CpuGeneratorsAVX.cpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526
  1. #include "CpuGenerators.h"
  2. #include "LightDoubleDouble.h"
  3. #include <immintrin.h>
  4. #include <omp.h>
  5. #include <cmath>
  6. #include <utility>
  7. #include <memory>
  8. using mnd::CpuGenerator;
  9. namespace mnd
  10. {
  11. template class CpuGenerator<float, mnd::X86_AVX, false>;
  12. template class CpuGenerator<float, mnd::X86_AVX, true>;
  13. template class CpuGenerator<double, mnd::X86_AVX, false>;
  14. template class CpuGenerator<double, mnd::X86_AVX, true>;
  15. template class CpuGenerator<DoubleDouble, mnd::X86_AVX, false>;
  16. template class CpuGenerator<DoubleDouble, mnd::X86_AVX, true>;
  17. }
  18. template<bool parallel>
  19. void CpuGenerator<float, mnd::X86_AVX, parallel>::generate(const mnd::MandelInfo& info, float* data)
  20. {
  21. using T = float;
  22. const MandelViewport& view = info.view;
  23. const float dppf = float(view.width / info.bWidth);
  24. const float viewxf = float(view.x);
  25. __m256 viewx = _mm256_set1_ps(viewxf);
  26. __m256 dpp = _mm256_set1_ps(dppf);
  27. T jX = mnd::convert<T>(info.juliaX);
  28. T jY = mnd::convert<T>(info.juliaY);
  29. __m256 juliaX = { jX, jX, jX, jX, jX, jX, jX, jX };
  30. __m256 juliaY = { jY, jY, jY, jY, jY, jY, jY, jY };
  31. #if defined(_OPENMP)
  32. if constexpr(parallel)
  33. omp_set_num_threads(omp_get_num_procs());
  34. # pragma omp parallel for schedule(static, 1) if (parallel)
  35. #endif
  36. for (long j = 0; j < info.bHeight; j++) {
  37. T y = T(view.y) + T(j) * T(view.height / info.bHeight);
  38. __m256 ys = _mm256_set1_ps(y);
  39. for (long i = 0; i < info.bWidth; i += 16) {
  40. __m256 pixc = { float(i), float(i + 1), float(i + 2), float(i + 3), float(i + 4), float(i + 5), float(i + 6), float(i + 7) };
  41. __m256 pixc2 = { float(i + 8), float(i + 9), float(i + 10), float(i + 11), float(i + 12), float(i + 13), float(i + 14), float(i + 15) };
  42. __m256 xs = _mm256_add_ps(_mm256_mul_ps(dpp, pixc), viewx);
  43. __m256 xs2 = _mm256_add_ps(_mm256_mul_ps(dpp, pixc2), viewx);
  44. __m256 counter = _mm256_setzero_ps();
  45. __m256 adder = _mm256_set1_ps(1);
  46. __m256 counter2 = _mm256_setzero_ps();
  47. __m256 adder2 = _mm256_set1_ps(1);
  48. __m256 threshold = _mm256_set1_ps(16);
  49. __m256 a = xs;
  50. __m256 a2 = xs2;
  51. __m256 b = ys;
  52. __m256 b2 = ys;
  53. __m256 cx = info.julia ? juliaX : xs;
  54. __m256 cx2 = info.julia ? juliaX : xs2;
  55. __m256 cy = info.julia ? juliaY : ys;
  56. __m256 resultsa = a;
  57. __m256 resultsb = b;
  58. __m256 resultsa2 = a2;
  59. __m256 resultsb2 = b2;
  60. if (info.smooth) {
  61. __m256 cmp = _mm256_cmp_ps(a, a, _CMP_LE_OQ);
  62. __m256 cmp2 = _mm256_cmp_ps(a, a, _CMP_LE_OQ);
  63. for (int k = 0; k < info.maxIter; k++) {
  64. __m256 aa = _mm256_mul_ps(a, a);
  65. __m256 aa2 = _mm256_mul_ps(a2, a2);
  66. __m256 bb = _mm256_mul_ps(b, b);
  67. __m256 bb2 = _mm256_mul_ps(b2, b2);
  68. __m256 abab = _mm256_mul_ps(a, b); abab = _mm256_add_ps(abab, abab);
  69. __m256 abab2 = _mm256_mul_ps(a2, b2); abab2 = _mm256_add_ps(abab2, abab2);
  70. a = _mm256_add_ps(_mm256_sub_ps(aa, bb), cx);
  71. a2 = _mm256_add_ps(_mm256_sub_ps(aa2, bb2), cx2);
  72. b = _mm256_add_ps(abab, cy);
  73. b2 = _mm256_add_ps(abab2, cy);
  74. resultsa = _mm256_or_ps(_mm256_andnot_ps(cmp, resultsa), _mm256_and_ps(cmp, a));
  75. resultsb = _mm256_or_ps(_mm256_andnot_ps(cmp, resultsb), _mm256_and_ps(cmp, b));
  76. resultsa2 = _mm256_or_ps(_mm256_andnot_ps(cmp2, resultsa2), _mm256_and_ps(cmp2, a2));
  77. resultsb2 = _mm256_or_ps(_mm256_andnot_ps(cmp2, resultsb2), _mm256_and_ps(cmp2, b2));
  78. cmp = _mm256_cmp_ps(_mm256_add_ps(aa, bb), threshold, _CMP_LE_OQ);
  79. cmp2 = _mm256_cmp_ps(_mm256_add_ps(aa2, bb2), threshold, _CMP_LE_OQ);
  80. adder = _mm256_and_ps(adder, cmp);
  81. counter = _mm256_add_ps(counter, adder);
  82. adder2 = _mm256_and_ps(adder2, cmp2);
  83. counter2 = _mm256_add_ps(counter2, adder2);
  84. if ((k & 0x7) == 0 && _mm256_testz_ps(cmp, cmp) != 0 && _mm256_testz_ps(cmp2, cmp2) != 0) {
  85. break;
  86. }
  87. }
  88. }
  89. else {
  90. for (int k = 0; k < info.maxIter; k++) {
  91. __m256 aa = _mm256_mul_ps(a, a);
  92. __m256 aa2 = _mm256_mul_ps(a2, a2);
  93. __m256 bb = _mm256_mul_ps(b, b);
  94. __m256 bb2 = _mm256_mul_ps(b2, b2);
  95. __m256 abab = _mm256_mul_ps(a, b); abab = _mm256_add_ps(abab, abab);
  96. __m256 abab2 = _mm256_mul_ps(a2, b2); abab2 = _mm256_add_ps(abab2, abab2);
  97. a = _mm256_add_ps(_mm256_sub_ps(aa, bb), cx);
  98. a2 = _mm256_add_ps(_mm256_sub_ps(aa2, bb2), cx2);
  99. b = _mm256_add_ps(abab, cy);
  100. b2 = _mm256_add_ps(abab2, cy);
  101. __m256 cmp = _mm256_cmp_ps(_mm256_add_ps(aa, bb), threshold, _CMP_LE_OQ);
  102. __m256 cmp2 = _mm256_cmp_ps(_mm256_add_ps(aa2, bb2), threshold, _CMP_LE_OQ);
  103. adder = _mm256_and_ps(adder, cmp);
  104. counter = _mm256_add_ps(counter, adder);
  105. adder2 = _mm256_and_ps(adder2, cmp2);
  106. counter2 = _mm256_add_ps(counter2, adder2);
  107. if ((k & 0x7) == 0 && _mm256_testz_ps(cmp, cmp) != 0 && _mm256_testz_ps(cmp2, cmp2) != 0) {
  108. break;
  109. }
  110. }
  111. }
  112. auto alignVec = [](float* data) -> float* {
  113. void* aligned = data;
  114. ::size_t length = 64;
  115. std::align(32, 8 * sizeof(float), aligned, length);
  116. return static_cast<float*>(aligned);
  117. };
  118. float resData[64];
  119. float* ftRes = alignVec(resData);
  120. float* resa = ftRes + 16;
  121. float* resb = resa + 16;
  122. _mm256_store_ps(ftRes, counter);
  123. _mm256_store_ps(ftRes + 8, counter2);
  124. _mm256_store_ps(resa, resultsa);
  125. _mm256_store_ps(resa + 8, resultsa2);
  126. _mm256_store_ps(resb, resultsb);
  127. _mm256_store_ps(resb + 8, resultsb2);
  128. for (int k = 0; k < 16 && i + k < info.bWidth; k++) {
  129. if (info.smooth) {
  130. data[i + k + j * info.bWidth] = ftRes[k] < 0 ? info.maxIter :
  131. ftRes[k] >= info.maxIter ? info.maxIter :
  132. ((float)ftRes[k]) + 1 - ::log(::log(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::log(2.0f);
  133. }
  134. else {
  135. data[i + k + j * info.bWidth] = ftRes[k] < 0 ? info.maxIter : ftRes[k];
  136. }
  137. }
  138. }
  139. }
  140. }
  141. template<bool parallel>
  142. void CpuGenerator<double, mnd::X86_AVX, parallel>::generate(const mnd::MandelInfo& info, float* data)
  143. {
  144. using T = double;
  145. const MandelViewport& view = info.view;
  146. const double dppf = double(view.width / info.bWidth);
  147. const double viewxf = double(view.x);
  148. __m256d viewx = { viewxf, viewxf, viewxf, viewxf };
  149. __m256d dpp = { dppf, dppf, dppf, dppf };
  150. T jX = mnd::convert<T>(info.juliaX);
  151. T jY = mnd::convert<T>(info.juliaY);
  152. __m256d juliaX = { jX, jX, jX, jX };
  153. __m256d juliaY = { jY, jY, jY, jY };
  154. #if defined(_OPENMP)
  155. if constexpr(parallel)
  156. omp_set_num_threads(omp_get_num_procs());
  157. # pragma omp parallel for schedule(static, 1) if (parallel)
  158. #endif
  159. for (long j = 0; j < info.bHeight; j++) {
  160. T y = T(view.y + T(j) * view.height / info.bHeight);
  161. __m256d ys = { y, y, y, y };
  162. for (long i = 0; i < info.bWidth; i += 8) {
  163. __m256d pixc = { double(i), double(i + 1), double(i + 2), double(i + 3) };
  164. __m256d pixc2 = { double(i + 4), double(i + 5), double(i + 6), double(i + 7) };
  165. __m256d xs = _mm256_add_pd(_mm256_mul_pd(dpp, pixc), viewx);
  166. __m256d xs2 = _mm256_add_pd(_mm256_mul_pd(dpp, pixc2), viewx);
  167. int itRes[4] = { 0, 0, 0, 0 };
  168. __m256d threshold = { 16.0, 16.0, 16.0, 16.0 };
  169. __m256d counter = { 0, 0, 0, 0 };
  170. __m256d adder = { 1, 1, 1, 1 };
  171. __m256d counter2 = { 0, 0, 0, 0 };
  172. __m256d adder2 = { 1, 1, 1, 1 };
  173. __m256d resultsa = { 0, 0, 0, 0 };
  174. __m256d resultsb = { 0, 0, 0, 0 };
  175. __m256d resultsa2 = { 0, 0, 0, 0 };
  176. __m256d resultsb2 = { 0, 0, 0, 0 };
  177. __m256d a = xs;
  178. __m256d b = ys;
  179. __m256d a2 = xs2;
  180. __m256d b2 = ys;
  181. __m256d cx = info.julia ? juliaX : xs;
  182. __m256d cx2 = info.julia ? juliaX : xs2;
  183. __m256d cy = info.julia ? juliaY : ys;
  184. if (info.smooth) {
  185. __m256d cmp = _mm256_cmp_pd(a, a, _CMP_LE_OQ);
  186. __m256d cmp2 = _mm256_cmp_pd(a, a, _CMP_LE_OQ);
  187. for (int k = 0; k < info.maxIter; k++) {
  188. __m256d aa = _mm256_mul_pd(a, a);
  189. __m256d aa2 = _mm256_mul_pd(a2, a2);
  190. __m256d bb = _mm256_mul_pd(b, b);
  191. __m256d bb2 = _mm256_mul_pd(b2, b2);
  192. __m256d abab = _mm256_mul_pd(a, b); abab = _mm256_add_pd(abab, abab);
  193. __m256d abab2 = _mm256_mul_pd(a2, b2); abab2 = _mm256_add_pd(abab2, abab2);
  194. a = _mm256_add_pd(_mm256_sub_pd(aa, bb), cx);
  195. a2 = _mm256_add_pd(_mm256_sub_pd(aa2, bb2), cx2);
  196. b = _mm256_add_pd(abab, cy);
  197. b2 = _mm256_add_pd(abab2, cy);
  198. resultsa = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsa), _mm256_and_pd(cmp, a));
  199. resultsb = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsb), _mm256_and_pd(cmp, b));
  200. resultsa2 = _mm256_or_pd(_mm256_andnot_pd(cmp2, resultsa2), _mm256_and_pd(cmp2, a2));
  201. resultsb2 = _mm256_or_pd(_mm256_andnot_pd(cmp2, resultsb2), _mm256_and_pd(cmp2, b2));
  202. cmp = _mm256_cmp_pd(_mm256_add_pd(aa, bb), threshold, _CMP_LE_OQ);
  203. cmp2 = _mm256_cmp_pd(_mm256_add_pd(aa2, bb2), threshold, _CMP_LE_OQ);
  204. adder = _mm256_and_pd(adder, cmp);
  205. counter = _mm256_add_pd(counter, adder);
  206. adder2 = _mm256_and_pd(adder2, cmp2);
  207. counter2 = _mm256_add_pd(counter2, adder2);
  208. if ((k & 0x7) == 0 && _mm256_testz_si256(_mm256_castpd_si256(cmp), _mm256_castpd_si256(cmp)) != 0 &&
  209. _mm256_testz_si256(_mm256_castpd_si256(cmp2), _mm256_castpd_si256(cmp2)) != 0) {
  210. break;
  211. }
  212. }
  213. }
  214. else {
  215. for (int k = 0; k < info.maxIter; k++) {
  216. __m256d aa = _mm256_mul_pd(a, a);
  217. __m256d aa2 = _mm256_mul_pd(a2, a2);
  218. __m256d bb = _mm256_mul_pd(b, b);
  219. __m256d bb2 = _mm256_mul_pd(b2, b2);
  220. __m256d abab = _mm256_mul_pd(a, b); abab = _mm256_add_pd(abab, abab);
  221. __m256d abab2 = _mm256_mul_pd(a2, b2); abab2 = _mm256_add_pd(abab2, abab2);
  222. a = _mm256_add_pd(_mm256_sub_pd(aa, bb), cx);
  223. a2 = _mm256_add_pd(_mm256_sub_pd(aa2, bb2), cx2);
  224. b = _mm256_add_pd(abab, cy);
  225. b2 = _mm256_add_pd(abab2, cy);
  226. __m256d cmp = _mm256_cmp_pd(_mm256_add_pd(aa, bb), threshold, _CMP_LE_OQ);
  227. __m256d cmp2 = _mm256_cmp_pd(_mm256_add_pd(aa2, bb2), threshold, _CMP_LE_OQ);
  228. adder = _mm256_and_pd(adder, cmp);
  229. counter = _mm256_add_pd(counter, adder);
  230. adder2 = _mm256_and_pd(adder2, cmp2);
  231. counter2 = _mm256_add_pd(counter2, adder2);
  232. if ((k & 0x7) == 0 && _mm256_testz_si256(_mm256_castpd_si256(cmp), _mm256_castpd_si256(cmp)) != 0 &&
  233. _mm256_testz_si256(_mm256_castpd_si256(cmp2), _mm256_castpd_si256(cmp2)) != 0) {
  234. break;
  235. }
  236. }
  237. }
  238. auto alignVec = [](double* data) -> double* {
  239. void* aligned = data;
  240. ::size_t length = 64;
  241. std::align(32, 4 * sizeof(double), aligned, length);
  242. return static_cast<double*>(aligned);
  243. };
  244. double resData[8];
  245. double* ftRes = alignVec(resData);
  246. double* resa = (double*) &resultsa;
  247. double* resb = (double*) &resultsb;
  248. _mm256_store_pd(ftRes, counter);
  249. for (int k = 0; k < 4 && i + k < info.bWidth; k++) {
  250. if (info.smooth)
  251. data[i + k + j * info.bWidth] = ftRes[k] < 0 ? float(info.maxIter) :
  252. ftRes[k] >= info.maxIter ? float(info.maxIter) :
  253. float(((float)ftRes[k]) + 1 - ::log(::log(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::log(2.0f));
  254. else
  255. data[i + k + j * info.bWidth] = ftRes[k] >= 0 ? float(ftRes[k]) : info.maxIter;
  256. }
  257. resa = (double*) &resultsa2;
  258. resb = (double*) &resultsb2;
  259. _mm256_store_pd(ftRes, counter2);
  260. i += 4;
  261. for (int k = 0; k < 4 && i + k < info.bWidth; k++) {
  262. if (info.smooth)
  263. data[i + k + j * info.bWidth] = ftRes[k] < 0 ? float(info.maxIter) :
  264. ftRes[k] >= info.maxIter ? float(info.maxIter) :
  265. float(((float)ftRes[k]) + 1 - ::log(::log(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::log(2.0f));
  266. else
  267. data[i + k + j * info.bWidth] = ftRes[k] >= 0 ? float(ftRes[k]) : info.maxIter;
  268. }
  269. i -= 4;
  270. }
  271. }
  272. }
  273. struct VecPair
  274. {
  275. __m256d a;
  276. __m256d b;
  277. };
  278. static inline VecPair quickTwoSum(__m256d a, __m256d b)
  279. {
  280. __m256d s = _mm256_add_pd(a, b);
  281. __m256d e = _mm256_sub_pd(b, _mm256_sub_pd(s, a));
  282. return { s, e };
  283. }
  284. static inline VecPair quickTwoDiff(__m256d a, __m256d b)
  285. {
  286. __m256d s = _mm256_sub_pd(a, b);
  287. __m256d e = _mm256_sub_pd(_mm256_sub_pd(a, s), b);
  288. return { s, e };
  289. }
  290. static inline VecPair twoSum(__m256d a, __m256d b)
  291. {
  292. __m256d s = _mm256_add_pd(a, b);
  293. __m256d bb = _mm256_sub_pd(s, a);
  294. __m256d e = _mm256_add_pd(_mm256_sub_pd(a, _mm256_sub_pd(s, bb)), _mm256_sub_pd(b, bb));
  295. return { s, e };
  296. }
  297. static inline VecPair twoDiff(__m256d a, __m256d b)
  298. {
  299. __m256d s = _mm256_sub_pd(a, b);
  300. __m256d bb = _mm256_sub_pd(s, a);
  301. __m256d e = _mm256_sub_pd(_mm256_sub_pd(a, _mm256_sub_pd(s, bb)), _mm256_add_pd(b, bb));
  302. return { s, e };
  303. }
  304. static inline VecPair split(__m256d a)
  305. {
  306. /*
  307. // -- this should never happen when doing mandelbrot calculations,
  308. // so we omit this check.
  309. if (a > _QD_SPLIT_THRESH || a < -_QD_SPLIT_THRESH) {
  310. a *= 3.7252902984619140625e-09; // 2^-28
  311. temp = _QD_SPLITTER * a;
  312. hi = temp - (temp - a);
  313. lo = a - hi;
  314. hi *= 268435456.0; // 2^28
  315. lo *= 268435456.0; // 2^28
  316. } else {
  317. temp = _QD_SPLITTER * a;
  318. hi = temp - (temp - a);
  319. lo = a - hi;
  320. }
  321. */
  322. static const __m256d SPLITTER = { 134217729.0, 134217729.0, 134217729.0, 134217729.0 };
  323. __m256d temp = _mm256_mul_pd(SPLITTER, a);
  324. __m256d hi = _mm256_sub_pd(temp, _mm256_sub_pd(temp, a));
  325. __m256d lo = _mm256_sub_pd(a, hi);
  326. return { hi, lo };
  327. }
  328. static inline VecPair twoProd(__m256d a, __m256d b)
  329. {
  330. __m256d p = _mm256_mul_pd(a, b);
  331. auto[a_hi, a_lo] = split(a);
  332. auto[b_hi, b_lo] = split(b);
  333. __m256d err = _mm256_add_pd(_mm256_add_pd(_mm256_sub_pd(_mm256_mul_pd(a_hi, b_hi), p), _mm256_add_pd(_mm256_mul_pd(a_hi, b_lo), _mm256_mul_pd(a_lo, b_hi))), _mm256_mul_pd(a_lo, b_lo));
  334. return { p, err };
  335. }
  336. struct AvxDoubleDouble
  337. {
  338. __m256d x[2];
  339. inline AvxDoubleDouble(__m256d a, __m256d b) :
  340. x{ a, b }
  341. {}
  342. inline AvxDoubleDouble(double a, double b) :
  343. x{ _mm256_set1_pd(a), _mm256_set1_pd(b) }
  344. {}
  345. inline AvxDoubleDouble operator + (const AvxDoubleDouble& sm) const
  346. {
  347. auto[s, e] = twoSum(x[0], sm.x[0]);
  348. e = _mm256_add_pd(e, _mm256_add_pd(x[1], sm.x[1]));
  349. auto[r1, r2] = quickTwoSum(s, e);
  350. return AvxDoubleDouble{ r1, r2 };
  351. }
  352. inline AvxDoubleDouble operator - (const AvxDoubleDouble& sm) const
  353. {
  354. auto[s, e] = twoDiff(x[0], sm.x[0]);
  355. e = _mm256_add_pd(e, x[1]);
  356. e = _mm256_sub_pd(e, sm.x[1]);
  357. auto[r1, r2] = quickTwoSum(s, e);
  358. return AvxDoubleDouble{ r1, r2 };
  359. }
  360. inline AvxDoubleDouble operator * (const AvxDoubleDouble& sm) const
  361. {
  362. auto[p1, p2] = twoProd(this->x[0], sm.x[0]);
  363. p2 = _mm256_add_pd(p2,
  364. _mm256_add_pd(_mm256_mul_pd(sm.x[1], x[0]), _mm256_mul_pd(sm.x[0], x[1])) );
  365. auto[r1, r2] = quickTwoSum(p1, p2);
  366. return AvxDoubleDouble{ r1, r2 };
  367. }
  368. };
  369. template<bool parallel>
  370. void CpuGenerator<mnd::DoubleDouble, mnd::X86_AVX, parallel>::generate(const mnd::MandelInfo& info, float* data)
  371. {
  372. const MandelViewport& view = info.view;
  373. using T = LightDoubleDouble;
  374. T viewx = mnd::convert<T>(view.x);
  375. T viewy = mnd::convert<T>(view.y);
  376. T wpp = mnd::convert<T>(view.width / info.bWidth);
  377. T hpp = mnd::convert<T>(view.height / info.bHeight);
  378. T jX = mnd::convert<T>(info.juliaX);
  379. T jY = mnd::convert<T>(info.juliaY);
  380. AvxDoubleDouble juliaX = { jX[0], jX[1] };
  381. AvxDoubleDouble juliaY = { jY[0], jY[1] };
  382. #if defined(_OPENMP)
  383. if constexpr(parallel)
  384. omp_set_num_threads(omp_get_num_procs());
  385. # pragma omp parallel for schedule(static, 1) if (parallel)
  386. #endif
  387. for (long j = 0; j < info.bHeight; j++) {
  388. T y = viewy + T(double(j)) * hpp;
  389. AvxDoubleDouble ys{ y[0], y[1] };
  390. for (long i = 0; i < info.bWidth; i += 4) {
  391. T x1 = viewx + T(double(i)) * wpp;
  392. T x2 = x1 + wpp;
  393. T x3 = x2 + wpp;
  394. T x4 = x3 + wpp;
  395. __m256d x0s = {
  396. x1[0], x2[0], x3[0], x4[0],
  397. };
  398. __m256d x1s = {
  399. x1[1], x2[1], x3[1], x4[1],
  400. };
  401. AvxDoubleDouble xs{ x0s, x1s };
  402. AvxDoubleDouble cx = info.julia ? juliaX : xs;
  403. AvxDoubleDouble cy = info.julia ? juliaY : ys;
  404. int itRes[4] = { 0, 0, 0, 0 };
  405. __m256d threshold = { 16.0, 16.0, 16.0, 16.0 };
  406. __m256d counter = { 0, 0, 0, 0 };
  407. __m256d adder = { 1, 1, 1, 1 };
  408. AvxDoubleDouble a = xs;
  409. AvxDoubleDouble b = ys;
  410. __m256d resultsa = _mm256_set1_pd(0);
  411. __m256d resultsb = _mm256_set1_pd(0);
  412. __m256d cmp = _mm256_cmp_pd(threshold, threshold, _CMP_LE_OQ);
  413. for (int k = 0; k < info.maxIter; k++) {
  414. AvxDoubleDouble aa = a * a;
  415. AvxDoubleDouble bb = b * b;
  416. AvxDoubleDouble abab = a * b; abab = abab + abab;
  417. a = aa - bb + cx;
  418. b = abab + cy;
  419. if (info.smooth) {
  420. resultsa = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsa), _mm256_and_pd(cmp, a.x[0]));
  421. resultsb = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsb), _mm256_and_pd(cmp, b.x[0]));
  422. }
  423. cmp = _mm256_cmp_pd(_mm256_add_pd(aa.x[0], bb.x[0]), threshold, _CMP_LE_OQ);
  424. adder = _mm256_and_pd(adder, cmp);
  425. counter = _mm256_add_pd(counter, adder);
  426. if (_mm256_testz_si256(_mm256_castpd_si256(cmp), _mm256_castpd_si256(cmp)) != 0) {
  427. break;
  428. }
  429. }
  430. auto alignVec = [](double* data) -> double* {
  431. void* aligned = data;
  432. ::size_t length = 64;
  433. std::align(32, 4 * sizeof(double), aligned, length);
  434. return static_cast<double*>(aligned);
  435. };
  436. double resData[8];
  437. double* ftRes = alignVec(resData);
  438. double* resa = (double*) &resultsa;
  439. double* resb = (double*) &resultsb;
  440. _mm256_store_pd(ftRes, counter);
  441. for (int k = 0; k < 4 && i + k < info.bWidth; k++) {
  442. if (info.smooth)
  443. data[i + k + j * info.bWidth] = float(ftRes[k] < 0 ? info.maxIter :
  444. ftRes[k] >= info.maxIter ? info.maxIter :
  445. ((float)ftRes[k]) + 1 - ::log(::log(float(resa[k] * resa[k] + resb[k] * resb[k])) / 2) / ::log(2.0f));
  446. else
  447. data[i + k + j * info.bWidth] = ftRes[k] >= 0 ? float(ftRes[k]) : info.maxIter;
  448. }
  449. }
  450. }
  451. }