CpuGeneratorsAVXFMA.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480
  1. #include "CpuGenerators.h"
  2. #include <immintrin.h>
  3. #include <omp.h>
  4. #include <cmath>
  5. #include <utility>
  6. #include <memory>
  7. using mnd::CpuGenerator;
  8. namespace mnd
  9. {
  10. template class CpuGenerator<float, mnd::X86_AVX_FMA, false>;
  11. template class CpuGenerator<float, mnd::X86_AVX_FMA, true>;
  12. template class CpuGenerator<double, mnd::X86_AVX_FMA, false>;
  13. template class CpuGenerator<double, mnd::X86_AVX_FMA, true>;
  14. template class CpuGenerator<DoubleDouble, mnd::X86_AVX_FMA, false>;
  15. template class CpuGenerator<DoubleDouble, mnd::X86_AVX_FMA, true>;
  16. }
  17. template<bool parallel>
  18. void CpuGenerator<float, mnd::X86_AVX_FMA, parallel>::generate(const mnd::MandelInfo& info, float* data)
  19. {
  20. using T = float;
  21. const MandelViewport& view = info.view;
  22. const float dppf = float(view.width / info.bWidth);
  23. const float viewxf = float(view.x);
  24. __m256 viewx = { viewxf, viewxf, viewxf, viewxf, viewxf, viewxf, viewxf, viewxf };
  25. __m256 dpp = { dppf, dppf, dppf, dppf, dppf, dppf, dppf, dppf };
  26. T jX = mnd::convert<T>(info.juliaX);
  27. T jY = mnd::convert<T>(info.juliaY);
  28. __m256 juliaX = { jX, jX, jX, jX, jX, jX, jX, jX };
  29. __m256 juliaY = { jY, jY, jY, jY, jY, jY, jY, jY };
  30. if constexpr(parallel)
  31. omp_set_num_threads(omp_get_num_procs());
  32. #pragma omp parallel for schedule(static, 1) if (parallel)
  33. for (long j = 0; j < info.bHeight; j++) {
  34. T y = T(view.y) + T(j) * T(view.height / info.bHeight);
  35. __m256 ys = {y, y, y, y, y, y, y, y};
  36. long i = 0;
  37. for (i; i < info.bWidth; i += 16) {
  38. __m256 pixc = { float(i), float(i + 1), float(i + 2), float(i + 3), float(i + 4), float(i + 5), float(i + 6), float(i + 7) };
  39. __m256 pixc2 = { float(i + 8), float(i + 9), float(i + 10), float(i + 11), float(i + 12), float(i + 13), float(i + 14), float(i + 15) };
  40. __m256 xs = _mm256_add_ps(_mm256_mul_ps(dpp, pixc), viewx);
  41. __m256 xs2 = _mm256_add_ps(_mm256_mul_ps(dpp, pixc2), viewx);
  42. __m256 counter = { 0, 0, 0, 0, 0, 0, 0, 0 };
  43. __m256 adder = { 1, 1, 1, 1, 1, 1, 1, 1 };
  44. __m256 resultsa = { 0, 0, 0, 0, 0, 0, 0, 0 };
  45. __m256 resultsb = { 0, 0, 0, 0, 0, 0, 0, 0 };
  46. __m256 counter2 = { 0, 0, 0, 0, 0, 0, 0, 0 };
  47. __m256 adder2 = { 1, 1, 1, 1, 1, 1, 1, 1 };
  48. __m256 resultsa2 = { 0, 0, 0, 0, 0, 0, 0, 0 };
  49. __m256 resultsb2 = { 0, 0, 0, 0, 0, 0, 0, 0 };
  50. __m256 threshold = { 16.0f, 16.0f, 16.0f, 16.0f, 16.0f, 16.0f, 16.0f, 16.0f };
  51. __m256 two = { 2, 2, 2, 2, 2, 2, 2, 2 };
  52. __m256 a = xs;
  53. __m256 a2 = xs2;
  54. __m256 b = ys;
  55. __m256 b2 = ys;
  56. __m256 cx = info.julia ? juliaX : xs;
  57. __m256 cx2 = info.julia ? juliaX : xs2;
  58. __m256 cy = info.julia ? juliaY : ys;
  59. if (info.smooth) {
  60. for (int k = 0; k < info.maxIter; k++) {
  61. __m256 bb = _mm256_mul_ps(b, b);
  62. __m256 bb2 = _mm256_mul_ps(b2, b2);
  63. __m256 ab = _mm256_mul_ps(a, b);
  64. __m256 ab2 = _mm256_mul_ps(a2, b2);
  65. a = _mm256_add_ps(_mm256_fmsub_ps(a, a, bb), cx);
  66. a2 = _mm256_add_ps(_mm256_fmsub_ps(a2, a2, bb2), cx2);
  67. b = _mm256_fmadd_ps(two, ab, cy);
  68. b2 = _mm256_fmadd_ps(two, ab2, cy);
  69. __m256 cmp = _mm256_cmp_ps(_mm256_fmadd_ps(a, a, bb), threshold, _CMP_LE_OQ);
  70. __m256 cmp2 = _mm256_cmp_ps(_mm256_fmadd_ps(a2, a2, bb2), threshold, _CMP_LE_OQ);
  71. resultsa = _mm256_or_ps(_mm256_andnot_ps(cmp, resultsa), _mm256_and_ps(cmp, a));
  72. resultsb = _mm256_or_ps(_mm256_andnot_ps(cmp, resultsb), _mm256_and_ps(cmp, b));
  73. resultsa2 = _mm256_or_ps(_mm256_andnot_ps(cmp2, resultsa2), _mm256_and_ps(cmp2, a2));
  74. resultsb2 = _mm256_or_ps(_mm256_andnot_ps(cmp2, resultsb2), _mm256_and_ps(cmp2, b2));
  75. adder = _mm256_and_ps(adder, cmp);
  76. counter = _mm256_add_ps(counter, adder);
  77. adder2 = _mm256_and_ps(adder2, cmp2);
  78. counter2 = _mm256_add_ps(counter2, adder2);
  79. if ((k & 0x7) == 0 && _mm256_testz_ps(cmp, cmp) != 0 && _mm256_testz_ps(cmp2, cmp2) != 0) {
  80. break;
  81. }
  82. }
  83. }
  84. else {
  85. for (int k = 0; k < info.maxIter; k++) {
  86. __m256 aa = _mm256_mul_ps(a, a);
  87. __m256 aa2 = _mm256_mul_ps(a2, a2);
  88. __m256 bb = _mm256_mul_ps(b, b);
  89. __m256 bb2 = _mm256_mul_ps(b2, b2);
  90. __m256 abab = _mm256_mul_ps(a, b); abab = _mm256_add_ps(abab, abab);
  91. __m256 abab2 = _mm256_mul_ps(a2, b2); abab2 = _mm256_add_ps(abab2, abab2);
  92. a = _mm256_add_ps(_mm256_sub_ps(aa, bb), cx);
  93. a2 = _mm256_add_ps(_mm256_sub_ps(aa2, bb2), cx2);
  94. b = _mm256_add_ps(abab, cy);
  95. b2 = _mm256_add_ps(abab2, cy);
  96. __m256 cmp = _mm256_cmp_ps(_mm256_add_ps(aa, bb), threshold, _CMP_LE_OQ);
  97. __m256 cmp2 = _mm256_cmp_ps(_mm256_add_ps(aa2, bb2), threshold, _CMP_LE_OQ);
  98. adder = _mm256_and_ps(adder, cmp);
  99. counter = _mm256_add_ps(counter, adder);
  100. adder2 = _mm256_and_ps(adder2, cmp2);
  101. counter2 = _mm256_add_ps(counter2, adder2);
  102. if ((k & 0x7) == 0 && _mm256_testz_ps(cmp, cmp) != 0 && _mm256_testz_ps(cmp2, cmp2) != 0) {
  103. break;
  104. }
  105. }
  106. }
  107. auto alignVec = [](float* data) -> float* {
  108. void* aligned = data;
  109. ::size_t length = 64;
  110. std::align(32, 8 * sizeof(float), aligned, length);
  111. return static_cast<float*>(aligned);
  112. };
  113. float resData[64];
  114. float* ftRes = alignVec(resData);
  115. float* resa = ftRes + 16;
  116. float* resb = resa + 16;
  117. _mm256_store_ps(ftRes, counter);
  118. _mm256_store_ps(ftRes + 8, counter2);
  119. _mm256_store_ps(resa, resultsa);
  120. _mm256_store_ps(resa + 8, resultsa2);
  121. _mm256_store_ps(resb, resultsb);
  122. _mm256_store_ps(resb + 8, resultsb2);
  123. for (int k = 0; k < 16 && i + k < info.bWidth; k++) {
  124. if (info.smooth) {
  125. data[i + k + j * info.bWidth] = ftRes[k] <= 0 ? info.maxIter :
  126. ftRes[k] >= info.maxIter ? info.maxIter :
  127. ((float)ftRes[k]) + 1 - ::log(::log(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::log(2.0f);
  128. }
  129. else {
  130. data[i + k + j * info.bWidth] = ftRes[k] <= 0 ? info.maxIter : ftRes[k];
  131. }
  132. }
  133. }
  134. }
  135. }
  136. template<bool parallel>
  137. void CpuGenerator<double, mnd::X86_AVX_FMA, parallel>::generate(const mnd::MandelInfo& info, float* data)
  138. {
  139. using T = double;
  140. const MandelViewport& view = info.view;
  141. const double dppf = double(view.width / info.bWidth);
  142. const double viewxf = double(view.x);
  143. __m256d viewx = { viewxf, viewxf, viewxf, viewxf };
  144. __m256d dpp = { dppf, dppf, dppf, dppf };
  145. T jX = mnd::convert<T>(info.juliaX);
  146. T jY = mnd::convert<T>(info.juliaY);
  147. __m256d juliaX = { jX, jX, jX, jX };
  148. __m256d juliaY = { jY, jY, jY, jY };
  149. if constexpr(parallel)
  150. omp_set_num_threads(omp_get_num_procs());
  151. #pragma omp parallel for schedule(static, 1) if (parallel)
  152. for (long j = 0; j < info.bHeight; j++) {
  153. T y = T(view.y + T(j) * view.height / info.bHeight);
  154. __m256d ys = { y, y, y, y };
  155. long i = 0;
  156. for (i; i < info.bWidth; i += 8) {
  157. __m256d pixc = { double(i), double(i + 1), double(i + 2), double(i + 3) };
  158. __m256d pixc2 = { double(i + 4), double(i + 5), double(i + 6), double(i + 7) };
  159. __m256d xs = _mm256_fmadd_pd(dpp, pixc, viewx);
  160. __m256d xs2 = _mm256_fmadd_pd(dpp, pixc2, viewx);
  161. int itRes[4] = { 0, 0, 0, 0 };
  162. __m256d threshold = { 16.0, 16.0, 16.0, 16.0 };
  163. __m256d counter = { 0, 0, 0, 0 };
  164. __m256d adder = { 1, 1, 1, 1 };
  165. __m256d counter2 = { 0, 0, 0, 0 };
  166. __m256d adder2 = { 1, 1, 1, 1 };
  167. __m256d two = { 2, 2, 2, 2 };
  168. __m256d resultsa = { 0, 0, 0, 0 };
  169. __m256d resultsb = { 0, 0, 0, 0 };
  170. __m256d resultsa2 = { 0, 0, 0, 0 };
  171. __m256d resultsb2 = { 0, 0, 0, 0 };
  172. __m256d a = xs;
  173. __m256d b = ys;
  174. __m256d a2 = xs2;
  175. __m256d b2 = ys;
  176. __m256d cx = info.julia ? juliaX : xs;
  177. __m256d cy = info.julia ? juliaY : ys;
  178. __m256d cx2 = info.julia ? juliaX : xs2;
  179. //__m256d cy2 = info.julia ? juliaY : ys;
  180. for (int k = 0; k < info.maxIter; k++) {
  181. __m256d ab = _mm256_mul_pd(a, b);
  182. __m256d bb = _mm256_mul_pd(b, b);
  183. __m256d ab2 = _mm256_mul_pd(a2, b2);
  184. __m256d bb2 = _mm256_mul_pd(b2, b2);
  185. a = _mm256_fmsub_pd(a, a, bb);
  186. a = _mm256_add_pd(a, cx);
  187. a2 = _mm256_fmsub_pd(a2, a2, bb2);
  188. a2 = _mm256_add_pd(a2, cx2);
  189. b = _mm256_fmadd_pd(two, ab, cy);
  190. b2 = _mm256_fmadd_pd(two, ab2, cy);
  191. __m256d cmp = _mm256_cmp_pd(_mm256_fmadd_pd(a, a, bb), threshold, _CMP_LE_OQ);
  192. __m256d cmp2 = _mm256_cmp_pd(_mm256_fmadd_pd(a2, a2, bb2), threshold, _CMP_LE_OQ);
  193. if (info.smooth) {
  194. resultsa = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsa), _mm256_and_pd(cmp, a));
  195. resultsb = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsb), _mm256_and_pd(cmp, b));
  196. resultsa2 = _mm256_or_pd(_mm256_andnot_pd(cmp2, resultsa2), _mm256_and_pd(cmp2, a2));
  197. resultsb2 = _mm256_or_pd(_mm256_andnot_pd(cmp2, resultsb2), _mm256_and_pd(cmp2, b2));
  198. }
  199. adder = _mm256_and_pd(adder, cmp);
  200. adder2 = _mm256_and_pd(adder2, cmp2);
  201. counter = _mm256_add_pd(counter, adder);
  202. counter2 = _mm256_add_pd(counter2, adder2);
  203. if ((k & 0x7) == 0 && _mm256_testz_si256(_mm256_castpd_si256(cmp), _mm256_castpd_si256(cmp)) != 0 &&
  204. _mm256_testz_si256(_mm256_castpd_si256(cmp2), _mm256_castpd_si256(cmp2)) != 0) {
  205. break;
  206. }
  207. }
  208. auto alignVec = [](double* data) -> double* {
  209. void* aligned = data;
  210. ::size_t length = 64;
  211. std::align(32, 4 * sizeof(double), aligned, length);
  212. return static_cast<double*>(aligned);
  213. };
  214. double resData[8];
  215. double* ftRes = alignVec(resData);
  216. double* resa = (double*) &resultsa;
  217. double* resb = (double*) &resultsb;
  218. _mm256_store_pd(ftRes, counter);
  219. for (int k = 0; k < 4 && i + k < info.bWidth; k++) {
  220. if (info.smooth)
  221. data[i + k + j * info.bWidth] = ftRes[k] <= 0 ? info.maxIter :
  222. ftRes[k] >= info.maxIter ? info.maxIter :
  223. ((float)ftRes[k]) + 1 - ::log(::log(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::log(2.0f);
  224. else
  225. data[i + k + j * info.bWidth] = ftRes[k] > 0 ? float(ftRes[k]) : info.maxIter;
  226. }
  227. resa = (double*) &resultsa2;
  228. resb = (double*) &resultsb2;
  229. _mm256_store_pd(ftRes, counter2);
  230. i += 4;
  231. for (int k = 0; k < 4 && i + k < info.bWidth; k++) {
  232. if (info.smooth)
  233. data[i + k + j * info.bWidth] = ftRes[k] <= 0 ? info.maxIter :
  234. ftRes[k] >= info.maxIter ? info.maxIter :
  235. ((float)ftRes[k]) + 1 - ::log(::log(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::log(2.0f);
  236. else
  237. data[i + k + j * info.bWidth] = ftRes[k] > 0 ? float(ftRes[k]) : info.maxIter;
  238. }
  239. i -= 4;
  240. }
  241. }
  242. }
  243. struct VecPair
  244. {
  245. __m256d a;
  246. __m256d b;
  247. };
  248. static inline VecPair quickTwoSum(__m256d a, __m256d b)
  249. {
  250. __m256d s = _mm256_add_pd(a, b);
  251. __m256d e = _mm256_sub_pd(b, _mm256_sub_pd(s, a));
  252. return { s, e };
  253. }
  254. static inline VecPair quickTwoDiff(__m256d a, __m256d b)
  255. {
  256. __m256d s = _mm256_sub_pd(a, b);
  257. __m256d e = _mm256_sub_pd(_mm256_sub_pd(a, s), b);
  258. return { s, e };
  259. }
  260. static inline VecPair twoSum(__m256d a, __m256d b)
  261. {
  262. __m256d s = _mm256_add_pd(a, b);
  263. __m256d bb = _mm256_sub_pd(s, a);
  264. __m256d e = _mm256_add_pd(_mm256_sub_pd(a, _mm256_sub_pd(s, bb)), _mm256_sub_pd(b, bb));
  265. return { s, e };
  266. }
  267. static inline VecPair twoDiff(__m256d a, __m256d b)
  268. {
  269. __m256d s = _mm256_sub_pd(a, b);
  270. __m256d bb = _mm256_sub_pd(s, a);
  271. __m256d e = _mm256_sub_pd(_mm256_sub_pd(a, _mm256_sub_pd(s, bb)), _mm256_add_pd(b, bb));
  272. return { s, e };
  273. }
  274. /*
  275. static inline VecPair split(__m256d a)
  276. {
  277. static const __m256d SPLIT_THRESH = { 6.69692879491417e+299, 6.69692879491417e+299, 6.69692879491417e+299, 6.69692879491417e+299 };
  278. static const __m256d MINUS_SPLIT_THRESH = { -6.69692879491417e+299, -6.69692879491417e+299, -6.69692879491417e+299, -6.69692879491417e+299 };
  279. static const __m256d SPLITTER = { 134217729.0, 134217729.0, 134217729.0, 134217729.0};
  280. __m256d temp;
  281. __m256i cmp1 = _mm256_castpd_si256(_mm256_cmp_pd(a, SPLIT_THRESH, _CMP_GT_OQ));
  282. __m256i cmp2 = _mm256_castpd_si256(_mm256_cmp_pd(a, MINUS_SPLIT_THRESH, _CMP_LT_OQ));
  283. __m256i cmp = _mm256_or_si256
  284. }*/
  285. static inline VecPair twoProd(__m256d a, __m256d b)
  286. {
  287. //#ifdef CPUID_FMA
  288. __m256d p = _mm256_mul_pd(a, b);
  289. __m256d e = _mm256_fmsub_pd(a, b, p);
  290. return { p, e };
  291. //#else
  292. /* double a_hi, a_lo, b_hi, b_lo;
  293. __m256d p = _mm256_mul_ps(a, b);
  294. split(a, a_hi, a_lo);
  295. split(b, b_hi, b_lo);
  296. err = ((a_hi * b_hi - p) + a_hi * b_lo + a_lo * b_hi) + a_lo * b_lo;
  297. return p;*/
  298. //#endif
  299. }
  300. struct AvxDoubleDouble
  301. {
  302. __m256d x[2];
  303. inline AvxDoubleDouble(__m256d a, __m256d b) :
  304. x{ a, b }
  305. {}
  306. inline AvxDoubleDouble operator + (const AvxDoubleDouble& sm) const
  307. {
  308. auto[s, e] = twoSum(x[0], sm.x[0]);
  309. e = _mm256_add_pd(e, _mm256_add_pd(x[1], sm.x[1]));
  310. auto[r1, r2] = quickTwoSum(s, e);
  311. return AvxDoubleDouble{ r1, r2 };
  312. }
  313. inline AvxDoubleDouble operator - (const AvxDoubleDouble& sm) const
  314. {
  315. auto[s, e] = twoDiff(x[0], sm.x[0]);
  316. e = _mm256_add_pd(e, x[1]);
  317. e = _mm256_sub_pd(e, sm.x[1]);
  318. auto[r1, r2] = quickTwoSum(s, e);
  319. return AvxDoubleDouble{ r1, r2 };
  320. }
  321. inline AvxDoubleDouble operator * (const AvxDoubleDouble& sm) const
  322. {
  323. auto[p1, p2] = twoProd(this->x[0], sm.x[0]);
  324. p2 = _mm256_add_pd(p2,
  325. _mm256_add_pd(_mm256_mul_pd(sm.x[1], x[0]), _mm256_mul_pd(sm.x[0], x[1])) );
  326. auto[r1, r2] = quickTwoSum(p1, p2);
  327. return AvxDoubleDouble{ r1, r2 };
  328. }
  329. };
  330. template<bool parallel>
  331. void CpuGenerator<mnd::DoubleDouble, mnd::X86_AVX_FMA, parallel>::generate(const mnd::MandelInfo& info, float* data)
  332. {
  333. const MandelViewport& view = info.view;
  334. using T = DoubleDouble;
  335. T viewx = mnd::convert<T>(view.x);
  336. T viewy = mnd::convert<T>(view.y);
  337. T wpp = mnd::convert<T>(view.width / info.bWidth);
  338. T hpp = mnd::convert<T>(view.height / info.bHeight);
  339. T jX = mnd::convert<T>(info.juliaX);
  340. T jY = mnd::convert<T>(info.juliaY);
  341. AvxDoubleDouble juliaX = { __m256d{ jX.x[0], jX.x[0], jX.x[0], jX.x[0] }, __m256d{ jX.x[1], jX.x[1], jX.x[1], jX.x[1] } };
  342. AvxDoubleDouble juliaY = { __m256d{ jY.x[0], jY.x[0], jY.x[0], jY.x[0] }, __m256d{ jY.x[1], jY.x[1], jY.x[1], jY.x[1] } };
  343. if constexpr(parallel)
  344. omp_set_num_threads(omp_get_num_procs());
  345. #pragma omp parallel for schedule(static, 1) if (parallel)
  346. for (long j = 0; j < info.bHeight; j++) {
  347. T y = viewy + T(double(j)) * hpp;
  348. __m256d y0s = { y.x[0], y.x[0], y.x[0], y.x[0] };
  349. __m256d y1s = { y.x[1], y.x[1], y.x[1], y.x[1] };
  350. AvxDoubleDouble ys{ y0s, y1s };
  351. long i = 0;
  352. for (i; i < info.bWidth; i += 4) {
  353. T x1 = viewx + T(double(i)) * wpp;
  354. T x2 = x1 + wpp;
  355. T x3 = x2 + wpp;
  356. T x4 = x3 + wpp;
  357. __m256d x0s = {
  358. x1.x[0], x2.x[0], x3.x[0], x4.x[0],
  359. };
  360. __m256d x1s = {
  361. x1.x[1], x2.x[1], x3.x[1], x4.x[1],
  362. };
  363. AvxDoubleDouble xs{ x0s, x1s };
  364. AvxDoubleDouble cx = info.julia ? juliaX : xs;
  365. AvxDoubleDouble cy = info.julia ? juliaY : ys;
  366. int itRes[4] = { 0, 0, 0, 0 };
  367. __m256d threshold = { 16.0, 16.0, 16.0, 16.0 };
  368. __m256d counter = { 0, 0, 0, 0 };
  369. __m256d adder = { 1, 1, 1, 1 };
  370. AvxDoubleDouble a = xs;
  371. AvxDoubleDouble b = ys;
  372. __m256d resultsa;
  373. __m256d resultsb;
  374. for (int k = 0; k < info.maxIter; k++) {
  375. AvxDoubleDouble aa = a * a;
  376. AvxDoubleDouble bb = b * b;
  377. AvxDoubleDouble abab = a * b; abab = abab + abab;
  378. a = aa - bb + cx;
  379. b = abab + cx;
  380. __m256d cmp = _mm256_cmp_pd(_mm256_add_pd(aa.x[0], bb.x[0]), threshold, _CMP_LE_OQ);
  381. if (info.smooth) {
  382. resultsa = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsa), _mm256_and_pd(cmp, a.x[0]));
  383. resultsb = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsb), _mm256_and_pd(cmp, b.x[0]));
  384. }
  385. adder = _mm256_and_pd(adder, cmp);
  386. counter = _mm256_add_pd(counter, adder);
  387. if (_mm256_testz_si256(_mm256_castpd_si256(cmp), _mm256_castpd_si256(cmp)) != 0) {
  388. break;
  389. }
  390. }
  391. auto alignVec = [](double* data) -> double* {
  392. void* aligned = data;
  393. ::size_t length = 64;
  394. std::align(32, 4 * sizeof(double), aligned, length);
  395. return static_cast<double*>(aligned);
  396. };
  397. double resData[8];
  398. double* ftRes = alignVec(resData);
  399. double* resa = (double*) &resultsa;
  400. double* resb = (double*) &resultsb;
  401. _mm256_store_pd(ftRes, counter);
  402. for (int k = 0; k < 4 && i + k < info.bWidth; k++) {
  403. if (info.smooth)
  404. data[i + k + j * info.bWidth] = ftRes[k] <= 0 ? info.maxIter :
  405. ftRes[k] >= info.maxIter ? info.maxIter :
  406. ((float)ftRes[k]) + 1 - ::log(::log(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::log(2.0f);
  407. else
  408. data[i + k + j * info.bWidth] = ftRes[k] > 0 ? float(ftRes[k]) : info.maxIter;
  409. }
  410. }
  411. }
  412. }