CpuGeneratorsAVXFMA.cpp 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911
  1. #include "CpuGenerators.h"
  2. #include <immintrin.h>
  3. #include <omp.h>
  4. #include <cmath>
  5. #include <utility>
  6. #include <memory>
  7. using mnd::CpuGenerator;
  8. namespace mnd
  9. {
  10. template class CpuGenerator<float, mnd::X86_AVX_FMA, false>;
  11. template class CpuGenerator<float, mnd::X86_AVX_FMA, true>;
  12. template class CpuGenerator<double, mnd::X86_AVX_FMA, false>;
  13. template class CpuGenerator<double, mnd::X86_AVX_FMA, true>;
  14. template class CpuGenerator<DoubleDouble, mnd::X86_AVX_FMA, false>;
  15. template class CpuGenerator<DoubleDouble, mnd::X86_AVX_FMA, true>;
  16. template class CpuGenerator<QuadDouble, mnd::X86_AVX_FMA, false>;
  17. template class CpuGenerator<QuadDouble, mnd::X86_AVX_FMA, true>;
  18. }
  19. template<bool parallel>
  20. void CpuGenerator<float, mnd::X86_AVX_FMA, parallel>::generate(const mnd::MandelInfo& info, float* data)
  21. {
  22. using T = float;
  23. const MandelViewport& view = info.view;
  24. const float dppf = float(view.width / info.bWidth);
  25. const float viewxf = float(view.x);
  26. __m256 viewx = { viewxf, viewxf, viewxf, viewxf, viewxf, viewxf, viewxf, viewxf };
  27. __m256 dpp = { dppf, dppf, dppf, dppf, dppf, dppf, dppf, dppf };
  28. T jX = mnd::convert<T>(info.juliaX);
  29. T jY = mnd::convert<T>(info.juliaY);
  30. __m256 juliaX = { jX, jX, jX, jX, jX, jX, jX, jX };
  31. __m256 juliaY = { jY, jY, jY, jY, jY, jY, jY, jY };
  32. #if defined(_OPENMP)
  33. if constexpr(parallel)
  34. omp_set_num_threads(omp_get_num_procs());
  35. # pragma omp parallel for schedule(static, 1) if (parallel)
  36. #endif
  37. for (long j = 0; j < info.bHeight; j++) {
  38. T y = T(view.y) + T(j) * T(view.height / info.bHeight);
  39. __m256 ys = {y, y, y, y, y, y, y, y};
  40. for (long i = 0; i < info.bWidth; i += 24) {
  41. __m256 pixc = { float(i), float(i + 1), float(i + 2), float(i + 3), float(i + 4), float(i + 5), float(i + 6), float(i + 7) };
  42. __m256 pixc2 = { float(i + 8), float(i + 9), float(i + 10), float(i + 11), float(i + 12), float(i + 13), float(i + 14), float(i + 15) };
  43. __m256 pixc3 = { float(i + 16), float(i + 17), float(i + 18), float(i + 19), float(i + 20), float(i + 21), float(i + 22), float(i + 23) };
  44. __m256 xs = _mm256_add_ps(_mm256_mul_ps(dpp, pixc), viewx);
  45. __m256 xs2 = _mm256_add_ps(_mm256_mul_ps(dpp, pixc2), viewx);
  46. __m256 xs3 = _mm256_add_ps(_mm256_mul_ps(dpp, pixc3), viewx);
  47. __m256 counter = { 0, 0, 0, 0, 0, 0, 0, 0 };
  48. __m256 adder = { 1, 1, 1, 1, 1, 1, 1, 1 };
  49. __m256 resultsa = { 0, 0, 0, 0, 0, 0, 0, 0 };
  50. __m256 resultsb = { 0, 0, 0, 0, 0, 0, 0, 0 };
  51. __m256 counter2 = { 0, 0, 0, 0, 0, 0, 0, 0 };
  52. __m256 adder2 = { 1, 1, 1, 1, 1, 1, 1, 1 };
  53. __m256 resultsa2 = { 0, 0, 0, 0, 0, 0, 0, 0 };
  54. __m256 resultsb2 = { 0, 0, 0, 0, 0, 0, 0, 0 };
  55. __m256 counter3 = { 0, 0, 0, 0, 0, 0, 0, 0 };
  56. __m256 adder3 = { 1, 1, 1, 1, 1, 1, 1, 1 };
  57. __m256 resultsa3 = { 0, 0, 0, 0, 0, 0, 0, 0 };
  58. __m256 resultsb3 = { 0, 0, 0, 0, 0, 0, 0, 0 };
  59. __m256 threshold = { 16.0f, 16.0f, 16.0f, 16.0f, 16.0f, 16.0f, 16.0f, 16.0f };
  60. __m256 two = { 2, 2, 2, 2, 2, 2, 2, 2 };
  61. __m256 a = xs;
  62. __m256 a2 = xs2;
  63. __m256 a3 = xs3;
  64. __m256 b = ys;
  65. __m256 b2 = ys;
  66. __m256 b3 = ys;
  67. __m256 cx = info.julia ? juliaX : xs;
  68. __m256 cx2 = info.julia ? juliaX : xs2;
  69. __m256 cx3 = info.julia ? juliaX : xs3;
  70. __m256 cy = info.julia ? juliaY : ys;
  71. if (info.smooth) {
  72. __m256 cmp = _mm256_cmp_ps(threshold, threshold, _CMP_LE_OQ);
  73. __m256 cmp2 = _mm256_cmp_ps(threshold, threshold, _CMP_LE_OQ);
  74. __m256 cmp3 = _mm256_cmp_ps(threshold, threshold, _CMP_LE_OQ);
  75. for (int k = 0; k < info.maxIter; k++) {
  76. __m256 bb = _mm256_mul_ps(b, b);
  77. __m256 bb2 = _mm256_mul_ps(b2, b2);
  78. __m256 bb3 = _mm256_mul_ps(b3, b3);
  79. __m256 ab = _mm256_mul_ps(a, b);
  80. __m256 ab2 = _mm256_mul_ps(a2, b2);
  81. __m256 ab3 = _mm256_mul_ps(a3, b3);
  82. __m256 olda = a;
  83. __m256 olda2 = a2;
  84. __m256 olda3 = a3;
  85. a = _mm256_add_ps(_mm256_fmsub_ps(a, a, bb), cx);
  86. a2 = _mm256_add_ps(_mm256_fmsub_ps(a2, a2, bb2), cx2);
  87. a3 = _mm256_add_ps(_mm256_fmsub_ps(a3, a3, bb3), cx3);
  88. b = _mm256_fmadd_ps(two, ab, cy);
  89. b2 = _mm256_fmadd_ps(two, ab2, cy);
  90. b3 = _mm256_fmadd_ps(two, ab3, cy);
  91. /*resultsa = _mm256_or_ps(_mm256_andnot_ps(cmp, resultsa), _mm256_and_ps(cmp, a));
  92. resultsb = _mm256_or_ps(_mm256_andnot_ps(cmp, resultsb), _mm256_and_ps(cmp, b));
  93. resultsa2 = _mm256_or_ps(_mm256_andnot_ps(cmp2, resultsa2), _mm256_and_ps(cmp2, a2));
  94. resultsb2 = _mm256_or_ps(_mm256_andnot_ps(cmp2, resultsb2), _mm256_and_ps(cmp2, b2));
  95. resultsa3 = _mm256_or_ps(_mm256_andnot_ps(cmp3, resultsa3), _mm256_and_ps(cmp3, a3));
  96. resultsb3 = _mm256_or_ps(_mm256_andnot_ps(cmp3, resultsb3), _mm256_and_ps(cmp3, b3));*/
  97. resultsa = _mm256_blendv_ps(resultsa, a, cmp);
  98. resultsb = _mm256_blendv_ps(resultsb, b, cmp);
  99. resultsa2 = _mm256_blendv_ps(resultsa2, a2, cmp2);
  100. resultsb2 = _mm256_blendv_ps(resultsb2, b2, cmp2);
  101. resultsa3 = _mm256_blendv_ps(resultsa3, a3, cmp3);
  102. resultsb3 = _mm256_blendv_ps(resultsb3, b3, cmp3);
  103. cmp = _mm256_cmp_ps(_mm256_fmadd_ps(olda, olda, bb), threshold, _CMP_LE_OQ);
  104. cmp2 = _mm256_cmp_ps(_mm256_fmadd_ps(olda2, olda2, bb2), threshold, _CMP_LE_OQ);
  105. cmp3 = _mm256_cmp_ps(_mm256_fmadd_ps(olda3, olda3, bb3), threshold, _CMP_LE_OQ);
  106. adder = _mm256_and_ps(adder, cmp);
  107. counter = _mm256_add_ps(counter, adder);
  108. adder2 = _mm256_and_ps(adder2, cmp2);
  109. counter2 = _mm256_add_ps(counter2, adder2);
  110. adder3 = _mm256_and_ps(adder3, cmp3);
  111. counter3 = _mm256_add_ps(counter3, adder3);
  112. if ((k & 0x7) == 0 && _mm256_testz_ps(cmp, cmp) != 0 && _mm256_testz_ps(cmp2, cmp2) != 0 && _mm256_testz_ps(cmp3, cmp3) != 0) {
  113. break;
  114. }
  115. }
  116. }
  117. else {
  118. for (int k = 0; k < info.maxIter; k++) {
  119. __m256 bb = _mm256_mul_ps(b, b);
  120. __m256 bb2 = _mm256_mul_ps(b2, b2);
  121. __m256 bb3 = _mm256_mul_ps(b3, b3);
  122. __m256 ab = _mm256_mul_ps(a, b);
  123. __m256 ab2 = _mm256_mul_ps(a2, b2);
  124. __m256 ab3 = _mm256_mul_ps(a3, b3);
  125. __m256 cmp = _mm256_cmp_ps(_mm256_fmadd_ps(a, a, bb), threshold, _CMP_LE_OQ);
  126. __m256 cmp2 = _mm256_cmp_ps(_mm256_fmadd_ps(a2, a2, bb2), threshold, _CMP_LE_OQ);
  127. __m256 cmp3 = _mm256_cmp_ps(_mm256_fmadd_ps(a3, a3, bb3), threshold, _CMP_LE_OQ);
  128. a = _mm256_add_ps(_mm256_fmsub_ps(a, a, bb), cx);
  129. a2 = _mm256_add_ps(_mm256_fmsub_ps(a2, a2, bb2), cx2);
  130. a3 = _mm256_add_ps(_mm256_fmsub_ps(a3, a3, bb3), cx3);
  131. b = _mm256_fmadd_ps(two, ab, cy);
  132. b2 = _mm256_fmadd_ps(two, ab2, cy);
  133. b3 = _mm256_fmadd_ps(two, ab3, cy);
  134. adder = _mm256_and_ps(adder, cmp);
  135. counter = _mm256_add_ps(counter, adder);
  136. adder2 = _mm256_and_ps(adder2, cmp2);
  137. counter2 = _mm256_add_ps(counter2, adder2);
  138. adder3 = _mm256_and_ps(adder3, cmp3);
  139. counter3 = _mm256_add_ps(counter3, adder3);
  140. if ((k & 0x7) == 0 && _mm256_testz_ps(cmp, cmp) != 0 && _mm256_testz_ps(cmp2, cmp2) != 0 && _mm256_testz_ps(cmp3, cmp3) != 0) {
  141. break;
  142. }
  143. }
  144. }
  145. auto alignVec = [](float* data) -> float* {
  146. void* aligned = data;
  147. ::size_t length = 64;
  148. std::align(32, 8 * sizeof(float), aligned, length);
  149. return static_cast<float*>(aligned);
  150. };
  151. float resData[96];
  152. float* ftRes = alignVec(resData);
  153. float* resa = ftRes + 24;
  154. float* resb = resa + 24;
  155. _mm256_store_ps(ftRes, counter);
  156. _mm256_store_ps(ftRes + 8, counter2);
  157. _mm256_store_ps(ftRes + 16, counter3);
  158. _mm256_store_ps(resa, resultsa);
  159. _mm256_store_ps(resa + 8, resultsa2);
  160. _mm256_store_ps(resa + 16, resultsa3);
  161. _mm256_store_ps(resb, resultsb);
  162. _mm256_store_ps(resb + 8, resultsb2);
  163. _mm256_store_ps(resb + 16, resultsb3);
  164. for (int k = 0; k < 24 && i + k < info.bWidth; k++) {
  165. if (info.smooth) {
  166. data[i + k + j * info.bWidth] = ftRes[k] < 0 ? info.maxIter :
  167. ftRes[k] >= info.maxIter ? info.maxIter :
  168. ((float)ftRes[k]) + 1 - ::log(::log(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::log(2.0f);
  169. }
  170. else {
  171. data[i + k + j * info.bWidth] = ftRes[k] < 0 ? info.maxIter : ftRes[k];
  172. }
  173. }
  174. }
  175. }
  176. }
  177. template<bool parallel>
  178. void CpuGenerator<double, mnd::X86_AVX_FMA, parallel>::generate(const mnd::MandelInfo& info, float* data)
  179. {
  180. using T = double;
  181. const MandelViewport& view = info.view;
  182. const double dppf = double(view.width / info.bWidth);
  183. const double viewxf = double(view.x);
  184. __m256d viewx = { viewxf, viewxf, viewxf, viewxf };
  185. __m256d dpp = { dppf, dppf, dppf, dppf };
  186. T jX = mnd::convert<T>(info.juliaX);
  187. T jY = mnd::convert<T>(info.juliaY);
  188. __m256d juliaX = { jX, jX, jX, jX };
  189. __m256d juliaY = { jY, jY, jY, jY };
  190. #if defined(_OPENMP)
  191. if constexpr(parallel)
  192. omp_set_num_threads(omp_get_num_procs());
  193. # pragma omp parallel for schedule(static, 1) if (parallel)
  194. #endif
  195. for (long j = 0; j < info.bHeight; j++) {
  196. T y = T(view.y + T(j) * view.height / info.bHeight);
  197. __m256d ys = { y, y, y, y };
  198. for (long i = 0; i < info.bWidth; i += 8) {
  199. __m256d pixc = { double(i), double(i + 1), double(i + 2), double(i + 3) };
  200. __m256d pixc2 = { double(i + 4), double(i + 5), double(i + 6), double(i + 7) };
  201. __m256d xs = _mm256_fmadd_pd(dpp, pixc, viewx);
  202. __m256d xs2 = _mm256_fmadd_pd(dpp, pixc2, viewx);
  203. int itRes[4] = { 0, 0, 0, 0 };
  204. __m256d threshold = { 16.0, 16.0, 16.0, 16.0 };
  205. __m256d counter = { 0, 0, 0, 0 };
  206. __m256d adder = { 1, 1, 1, 1 };
  207. __m256d counter2 = { 0, 0, 0, 0 };
  208. __m256d adder2 = { 1, 1, 1, 1 };
  209. __m256d two = { 2, 2, 2, 2 };
  210. __m256d resultsa = { 0, 0, 0, 0 };
  211. __m256d resultsb = { 0, 0, 0, 0 };
  212. __m256d resultsa2 = { 0, 0, 0, 0 };
  213. __m256d resultsb2 = { 0, 0, 0, 0 };
  214. __m256d a = xs;
  215. __m256d b = ys;
  216. __m256d a2 = xs2;
  217. __m256d b2 = ys;
  218. __m256d cx = info.julia ? juliaX : xs;
  219. __m256d cy = info.julia ? juliaY : ys;
  220. __m256d cx2 = info.julia ? juliaX : xs2;
  221. //__m256d cy2 = info.julia ? juliaY : ys;
  222. __m256d cmp = _mm256_cmp_pd(threshold, threshold, _CMP_LE_OQ);
  223. __m256d cmp2 = _mm256_cmp_pd(threshold, threshold, _CMP_LE_OQ);
  224. for (int k = 0; k < info.maxIter; k++) {
  225. __m256d aa = _mm256_mul_pd(a, a);
  226. __m256d ab = _mm256_mul_pd(a, b);
  227. __m256d bb = _mm256_mul_pd(b, b);
  228. __m256d aa2 = _mm256_mul_pd(a2, a2);
  229. __m256d ab2 = _mm256_mul_pd(a2, b2);
  230. __m256d bb2 = _mm256_mul_pd(b2, b2);
  231. a = _mm256_fmsub_pd(a, a, bb);
  232. a = _mm256_add_pd(a, cx);
  233. a2 = _mm256_fmsub_pd(a2, a2, bb2);
  234. a2 = _mm256_add_pd(a2, cx2);
  235. b = _mm256_fmadd_pd(two, ab, cy);
  236. b2 = _mm256_fmadd_pd(two, ab2, cy);
  237. if (info.smooth) {
  238. resultsa = _mm256_blendv_pd(resultsa, a, cmp);
  239. resultsb = _mm256_blendv_pd(resultsb, b, cmp);
  240. resultsa2 = _mm256_blendv_pd(resultsa2, a2, cmp2);
  241. resultsb2 = _mm256_blendv_pd(resultsb2, b2, cmp2);
  242. }
  243. cmp = _mm256_cmp_pd(_mm256_add_pd(aa, bb), threshold, _CMP_LE_OQ);
  244. cmp2 = _mm256_cmp_pd(_mm256_add_pd(aa2, bb2), threshold, _CMP_LE_OQ);
  245. adder = _mm256_and_pd(adder, cmp);
  246. adder2 = _mm256_and_pd(adder2, cmp2);
  247. counter = _mm256_add_pd(counter, adder);
  248. counter2 = _mm256_add_pd(counter2, adder2);
  249. if ((k & 0x7) == 0 && _mm256_testz_si256(_mm256_castpd_si256(cmp), _mm256_castpd_si256(cmp)) != 0 &&
  250. _mm256_testz_si256(_mm256_castpd_si256(cmp2), _mm256_castpd_si256(cmp2)) != 0) {
  251. break;
  252. }
  253. }
  254. auto alignVec = [](double* data) -> double* {
  255. void* aligned = data;
  256. ::size_t length = 64;
  257. std::align(32, 4 * sizeof(double), aligned, length);
  258. return static_cast<double*>(aligned);
  259. };
  260. double resData[8];
  261. double* ftRes = alignVec(resData);
  262. double* resa = (double*) &resultsa;
  263. double* resb = (double*) &resultsb;
  264. _mm256_store_pd(ftRes, counter);
  265. for (int k = 0; k < 4 && i + k < info.bWidth; k++) {
  266. if (info.smooth)
  267. data[i + k + j * info.bWidth] = ftRes[k] < 0 ? info.maxIter :
  268. ftRes[k] >= info.maxIter ? info.maxIter :
  269. ((float)ftRes[k]) + 1 - ::log(::log(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::log(2.0f);
  270. else
  271. data[i + k + j * info.bWidth] = ftRes[k] < 0 ? info.maxIter : float(ftRes[k]);
  272. }
  273. resa = (double*) &resultsa2;
  274. resb = (double*) &resultsb2;
  275. _mm256_store_pd(ftRes, counter2);
  276. i += 4;
  277. for (int k = 0; k < 4 && i + k < info.bWidth; k++) {
  278. if (info.smooth)
  279. data[i + k + j * info.bWidth] = ftRes[k] < 0 ? info.maxIter :
  280. ftRes[k] >= info.maxIter ? info.maxIter :
  281. ((float)ftRes[k]) + 1 - ::log(::log(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::log(2.0f);
  282. else
  283. data[i + k + j * info.bWidth] = ftRes[k] < 0 ? info.maxIter : float(ftRes[k]);
  284. }
  285. i -= 4;
  286. }
  287. }
  288. }
  289. struct VecPair
  290. {
  291. __m256d a;
  292. __m256d b;
  293. };
  294. struct VecTriple
  295. {
  296. __m256d a;
  297. __m256d b;
  298. __m256d c;
  299. };
  300. struct VecQuadruple
  301. {
  302. __m256d a;
  303. __m256d b;
  304. __m256d c;
  305. __m256d d;
  306. };
  307. static inline VecPair quickTwoSum(__m256d a, __m256d b)
  308. {
  309. __m256d s = _mm256_add_pd(a, b);
  310. __m256d e = _mm256_sub_pd(b, _mm256_sub_pd(s, a));
  311. return { s, e };
  312. }
  313. static inline VecPair quickTwoDiff(__m256d a, __m256d b)
  314. {
  315. __m256d s = _mm256_sub_pd(a, b);
  316. __m256d e = _mm256_sub_pd(_mm256_sub_pd(a, s), b);
  317. return { s, e };
  318. }
  319. static inline VecPair twoSum(__m256d a, __m256d b)
  320. {
  321. __m256d s = _mm256_add_pd(a, b);
  322. __m256d bb = _mm256_sub_pd(s, a);
  323. __m256d e = _mm256_add_pd(_mm256_sub_pd(a, _mm256_sub_pd(s, bb)), _mm256_sub_pd(b, bb));
  324. return { s, e };
  325. }
  326. static inline VecPair twoDiff(__m256d a, __m256d b)
  327. {
  328. __m256d s = _mm256_sub_pd(a, b);
  329. __m256d bb = _mm256_sub_pd(s, a);
  330. __m256d e = _mm256_sub_pd(_mm256_sub_pd(a, _mm256_sub_pd(s, bb)), _mm256_add_pd(b, bb));
  331. return { s, e };
  332. }
  333. static inline VecTriple threeSum(__m256d a, __m256d b, __m256d c)
  334. {
  335. auto [s, e] = twoSum(a, b);
  336. auto [r0, e2] = twoSum(s, c);
  337. auto [r1, r2] = twoSum(e, e2);
  338. return { r0, r1, r2 };
  339. }
  340. static inline VecPair threeTwoSum(__m256d a, __m256d b, __m256d c)
  341. {
  342. auto[t, e1] = twoSum(a, b);
  343. auto[s, e2] = twoSum(t, c);
  344. return { s, _mm256_add_pd(e1, e2) };
  345. }
  346. static inline __m256d threeOneSum(__m256d a, __m256d b, __m256d c)
  347. {
  348. return _mm256_add_pd(a, _mm256_add_pd(b, c));
  349. }
  350. static inline VecTriple sixThreeSum(__m256d a, __m256d b, __m256d c,
  351. __m256d d, __m256d e, __m256d f)
  352. {
  353. auto[x0, x1, x2] = threeSum(a, b, c);
  354. auto[y0, y1, y2] = threeSum(d, e, f);
  355. auto[r0, t0] = twoSum(x0, y0);
  356. auto[t1, t2] = twoSum(x1, y1);
  357. auto[r1, t3] = twoSum(t0, t1);
  358. auto t4 = _mm256_add_pd(x2, y2);
  359. auto r2 = threeOneSum(t2, t3, t4);
  360. return { r0, r1, r2 };
  361. }
  362. static inline VecPair sixTwoSum(__m256d a, __m256d b, __m256d c,
  363. __m256d d, __m256d e, __m256d f)
  364. {
  365. auto[x0, x1, x2] = threeSum(a, b, c);
  366. auto[y0, y1, y2] = threeSum(d, e, f);
  367. auto[r0, t0] = twoSum(x0, y0);
  368. auto[t1, t2] = twoSum(x1, y1);
  369. auto[r1, t3] = twoSum(t0, t1);
  370. r1 = _mm256_add_pd(r1, _mm256_add_pd(x2, y2));
  371. r1 = _mm256_add_pd(r1, t3);
  372. return { r0, r1 };
  373. }
  374. static inline VecPair addDD(const VecPair& a, const VecPair& b)
  375. {
  376. auto[s, e] = twoSum(a.a, b.a);
  377. e = _mm256_add_pd(e, _mm256_add_pd(a.b, b.b));
  378. auto[r1, r2] = quickTwoSum(s, e);
  379. return { r1, r2 };
  380. }
  381. static inline VecPair nineTwoSum(__m256d a, __m256d b, __m256d c,
  382. __m256d d, __m256d e, __m256d f,
  383. __m256d g, __m256d h, __m256d i)
  384. {
  385. auto[x1, x2] = twoSum(a, d);
  386. auto[y1, y2] = twoSum(b, c);
  387. auto[z1, z2] = twoSum(e, h);
  388. auto[u1, u2] = twoSum(f, g);
  389. auto[t1, t2] = addDD({ x1, x2 }, { y1, y2 });
  390. auto[t3, t4] = addDD({ z1, z2 }, { u1, u2 });
  391. auto[t5, t6] = addDD({ t1, t2 }, { t3, t4 });
  392. return threeTwoSum(t5, t6, i);
  393. }
  394. static inline VecQuadruple renormalize(__m256d x0, __m256d x1, __m256d x2, __m256d x3, __m256d x4)
  395. {
  396. auto [st1, t4] = quickTwoSum(x3, x4);
  397. auto [st2, t3] = quickTwoSum(x2, st1);
  398. auto [st3, t2] = quickTwoSum(x1, st2);
  399. auto [t0, t1] = quickTwoSum(x0, st3);
  400. __m256d s = t0;
  401. __m256d e;
  402. __m256d t[] = { t1, t2, t3, t4 };
  403. __m256d b[4] = { 0, 0, 0, 0 };
  404. int k = 0;
  405. for (int i = 0; i < 4; i++) {
  406. auto[st, et] = quickTwoSum(s, t[i]);
  407. s = st; e = et;
  408. b[k] = s;
  409. //if (e != 0) {
  410. b[k] = s;
  411. s = e;
  412. k = k + 1;
  413. //}
  414. }
  415. return { b[0], b[1], b[2], b[3] };
  416. }
  417. static inline VecQuadruple renorm1(__m256d x0, __m256d x1, __m256d x2, __m256d x3, __m256d x4)
  418. {
  419. auto [r0, t0] = quickTwoSum(x0, x1);
  420. auto [r1, t1] = quickTwoSum(t0, x2);
  421. auto [r2, t2] = quickTwoSum(t1, x3);
  422. auto r3 = _mm256_add_pd(t2, x4);
  423. return { r0, r1, r2, r3 };
  424. }
  425. static inline VecQuadruple renorm2(__m256d x0, __m256d x1, __m256d x2, __m256d x3, __m256d x4)
  426. {
  427. auto [st1, t4] = quickTwoSum(x3, x4);
  428. auto [st2, t3] = quickTwoSum(x2, st1);
  429. auto [st3, t2] = quickTwoSum(x1, st2);
  430. auto [t0, t1] = quickTwoSum(x0, st3);
  431. __m256d e = t0;
  432. auto [r0, e1] = quickTwoSum(e, t1);
  433. auto [r1, e2] = quickTwoSum(e1, t2);
  434. auto [r2, e3] = quickTwoSum(e2, t3);
  435. auto r3 = _mm256_add_pd(e3, t4);
  436. return { r0, r1, r2, r3 };
  437. }
  438. static inline VecPair twoProd(__m256d a, __m256d b)
  439. {
  440. __m256d p = _mm256_mul_pd(a, b);
  441. __m256d e = _mm256_fmsub_pd(a, b, p);
  442. return { p, e };
  443. }
  444. struct AvxDoubleDouble
  445. {
  446. __m256d x[2];
  447. inline AvxDoubleDouble(__m256d a, __m256d b) :
  448. x{ a, b }
  449. {}
  450. inline AvxDoubleDouble(double a, double b) :
  451. x{ _mm256_set1_pd(a), _mm256_set1_pd(b) }
  452. {}
  453. inline AvxDoubleDouble operator + (const AvxDoubleDouble& sm) const
  454. {
  455. auto[s, e] = twoSum(x[0], sm.x[0]);
  456. e = _mm256_add_pd(e, _mm256_add_pd(x[1], sm.x[1]));
  457. auto[r1, r2] = quickTwoSum(s, e);
  458. return AvxDoubleDouble{ r1, r2 };
  459. }
  460. inline AvxDoubleDouble operator - (const AvxDoubleDouble& sm) const
  461. {
  462. auto[s, e] = twoDiff(x[0], sm.x[0]);
  463. e = _mm256_add_pd(e, x[1]);
  464. e = _mm256_sub_pd(e, sm.x[1]);
  465. auto[r1, r2] = quickTwoSum(s, e);
  466. return AvxDoubleDouble{ r1, r2 };
  467. }
  468. inline AvxDoubleDouble operator * (const AvxDoubleDouble& sm) const
  469. {
  470. auto[p1, p2] = twoProd(this->x[0], sm.x[0]);
  471. p2 = _mm256_add_pd(p2,
  472. _mm256_add_pd(_mm256_mul_pd(sm.x[1], x[0]), _mm256_mul_pd(sm.x[0], x[1])) );
  473. auto[r1, r2] = quickTwoSum(p1, p2);
  474. return AvxDoubleDouble{ r1, r2 };
  475. }
  476. inline AvxDoubleDouble sq(void) const
  477. {
  478. auto[p1, p2] = twoProd(x[0], x[0]);
  479. __m256d x01 = _mm256_mul_pd(x[1], x[0]);
  480. p2 = _mm256_add_pd(p2, _mm256_add_pd(x01, x01));
  481. auto[r1, r2] = quickTwoSum(p1, p2);
  482. return AvxDoubleDouble{ r1, r2 };
  483. }
  484. inline AvxDoubleDouble mul_pow2(double v) const
  485. {
  486. __m256d vv = _mm256_set1_pd(v);
  487. return { _mm256_mul_pd(vv, x[0]), _mm256_mul_pd(vv, x[1]) };
  488. }
  489. };
  490. template<bool parallel>
  491. void CpuGenerator<mnd::DoubleDouble, mnd::X86_AVX_FMA, parallel>::generate(const mnd::MandelInfo& info, float* data)
  492. {
  493. const MandelViewport& view = info.view;
  494. using T = LightDoubleDouble;
  495. T viewx = mnd::convert<T>(view.x);
  496. T viewy = mnd::convert<T>(view.y);
  497. T wpp = mnd::convert<T>(view.width / info.bWidth);
  498. T hpp = mnd::convert<T>(view.height / info.bHeight);
  499. T jX = mnd::convert<T>(info.juliaX);
  500. T jY = mnd::convert<T>(info.juliaY);
  501. AvxDoubleDouble juliaX = { jX[0], jX[1] };
  502. AvxDoubleDouble juliaY = { jY[0], jY[1] };
  503. #if defined(_OPENMP)
  504. if constexpr(parallel)
  505. omp_set_num_threads(omp_get_num_procs());
  506. # pragma omp parallel for schedule(static, 1) if (parallel)
  507. #endif
  508. for (long j = 0; j < info.bHeight; j++) {
  509. T y = viewy + T(double(j)) * hpp;
  510. __m256d y0s = { y.x[0], y.x[0], y.x[0], y.x[0] };
  511. __m256d y1s = { y.x[1], y.x[1], y.x[1], y.x[1] };
  512. AvxDoubleDouble ys{ y0s, y1s };
  513. for (long i = 0; i < info.bWidth; i += 4) {
  514. T x1 = viewx + T(double(i)) * wpp;
  515. T x2 = x1 + wpp;
  516. T x3 = x2 + wpp;
  517. T x4 = x3 + wpp;
  518. __m256d x0s = {
  519. x1[0], x2[0], x3[0], x4[0],
  520. };
  521. __m256d x1s = {
  522. x1[1], x2[1], x3[1], x4[1],
  523. };
  524. AvxDoubleDouble xs{ x0s, x1s };
  525. AvxDoubleDouble cx = info.julia ? juliaX : xs;
  526. AvxDoubleDouble cy = info.julia ? juliaY : ys;
  527. int itRes[4] = { 0, 0, 0, 0 };
  528. __m256d threshold = { 16.0, 16.0, 16.0, 16.0 };
  529. __m256d counter = { 0, 0, 0, 0 };
  530. __m256d adder = { 1, 1, 1, 1 };
  531. AvxDoubleDouble a = xs;
  532. AvxDoubleDouble b = ys;
  533. __m256d resultsa;
  534. __m256d resultsb;
  535. __m256d cmp = _mm256_cmp_pd(threshold, threshold, _CMP_LE_OQ);
  536. for (int k = 0; k < info.maxIter; k++) {
  537. AvxDoubleDouble aa = a.sq();
  538. AvxDoubleDouble bb = b.sq();
  539. AvxDoubleDouble abab = a * b.mul_pow2(2.0);
  540. a = aa - bb + cx;
  541. b = abab + cy;
  542. if (info.smooth) {
  543. resultsa = _mm256_blendv_pd(resultsa, a.x[0], cmp);
  544. resultsb = _mm256_blendv_pd(resultsb, b.x[0], cmp);
  545. }
  546. cmp = _mm256_cmp_pd(_mm256_add_pd(aa.x[0], bb.x[0]), threshold, _CMP_LE_OQ);
  547. adder = _mm256_and_pd(adder, cmp);
  548. counter = _mm256_add_pd(counter, adder);
  549. if ((k & 0x7) && _mm256_testz_si256(_mm256_castpd_si256(cmp), _mm256_castpd_si256(cmp)) != 0) {
  550. break;
  551. }
  552. }
  553. auto alignVec = [](double* data) -> double* {
  554. void* aligned = data;
  555. ::size_t length = 64;
  556. std::align(32, 4 * sizeof(double), aligned, length);
  557. return static_cast<double*>(aligned);
  558. };
  559. double resData[8];
  560. double* ftRes = alignVec(resData);
  561. double* resa = (double*) &resultsa;
  562. double* resb = (double*) &resultsb;
  563. _mm256_store_pd(ftRes, counter);
  564. for (int k = 0; k < 4 && i + k < info.bWidth; k++) {
  565. if (info.smooth)
  566. data[i + k + j * info.bWidth] = ftRes[k] < 0 ? info.maxIter :
  567. ftRes[k] >= info.maxIter ? info.maxIter :
  568. ((float)ftRes[k]) + 1 - ::log(::log(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::log(2.0f);
  569. else
  570. data[i + k + j * info.bWidth] = ftRes[k] >= 0 ? float(ftRes[k]) : info.maxIter;
  571. }
  572. }
  573. }
  574. }
  575. struct AvxQuadDouble
  576. {
  577. __m256d x[4];
  578. inline AvxQuadDouble(__m256d a, __m256d b, __m256d c, __m256d d) :
  579. x{ a, b, c, d}
  580. {}
  581. inline AvxQuadDouble(double a, double b, double c, double d) :
  582. x{ _mm256_set1_pd(a), _mm256_set1_pd(b), _mm256_set1_pd(c), _mm256_set1_pd(d) }
  583. {}
  584. inline AvxQuadDouble operator + (const AvxQuadDouble& sm) const
  585. {
  586. auto[s0, e0] = twoSum(x[0], sm.x[0]);
  587. auto[s1, e1] = twoSum(x[1], sm.x[1]);
  588. auto[s2, e2] = twoSum(x[2], sm.x[2]);
  589. auto[s3, e3] = twoSum(x[3], sm.x[3]);
  590. __m256d r0 = s0;
  591. auto [r1, t0] = twoSum(s1, e0);
  592. auto [r2, t1, t2] = threeSum(s2, e1, t0);
  593. auto [r3, t3, _t4] = threeSum(s3, e2, t1);
  594. auto [r4, _t5, _t6] = threeSum(e3, t3, t2);
  595. auto [re0, re1, re2, re3] = renorm1(r0, r1, r2, r3, r4);
  596. return { re0, re1, re2, re3 };
  597. }
  598. inline AvxQuadDouble operator - (const AvxQuadDouble& sm) const
  599. {
  600. auto[s0, e0] = twoDiff(x[0], sm.x[0]);
  601. auto[s1, e1] = twoDiff(x[1], sm.x[1]);
  602. auto[s2, e2] = twoDiff(x[2], sm.x[2]);
  603. auto[s3, e3] = twoDiff(x[3], sm.x[3]);
  604. __m256d r0 = s0;
  605. auto [r1, t0] = twoSum(s1, e0);
  606. auto [r2, t1, t2] = threeSum(s2, e1, t0);
  607. auto [r3, t3, _t4] = threeSum(s3, e2, t1);
  608. auto [r4, _t5, _t6] = threeSum(e3, t3, t2);
  609. auto [re0, re1, re2, re3] = renorm1(r0, r1, r2, r3, r4);
  610. return { re0, re1, re2, re3 };
  611. }
  612. inline AvxQuadDouble operator * (const AvxQuadDouble& sm) const
  613. {
  614. auto[a0, b0] = twoProd(x[0], sm.x[0]);
  615. auto[b1, c0] = twoProd(x[0], sm.x[1]);
  616. auto[b2, c1] = twoProd(x[1], sm.x[0]);
  617. auto[c2, d0] = twoProd(x[0], sm.x[2]);
  618. auto[c3, d1] = twoProd(x[1], sm.x[1]);
  619. auto[c4, d2] = twoProd(x[2], sm.x[0]);
  620. auto d5 = _mm256_mul_pd(x[3], sm.x[0]);
  621. auto d6 = _mm256_mul_pd(x[2], sm.x[1]);
  622. auto d7 = _mm256_mul_pd(x[1], sm.x[2]);
  623. auto d8 = _mm256_mul_pd(x[0], sm.x[3]);
  624. auto r0 = a0;
  625. auto[r1, c5, d3] = threeSum(b0, b1, b2);
  626. auto[r2, d4, e0] = sixThreeSum(c0, c1, c2, c3, c4, c5);
  627. auto[r3, e1] = nineTwoSum(d0, d1, d2, d3, d4, d5, d6, d7, d8);
  628. auto r4 = _mm256_add_pd(e0, e1);
  629. auto [n0, n1, n2, n3] = renorm2(r0, r1, r2, r3, r4);
  630. return { n0, n1, n2, n3 };
  631. }
  632. inline AvxQuadDouble mul_pow2(double v) const
  633. {
  634. __m256d vv = _mm256_set1_pd(v);
  635. return { _mm256_mul_pd(vv, x[0]), _mm256_mul_pd(vv, x[1]),
  636. _mm256_mul_pd(vv, x[2]), _mm256_mul_pd(vv, x[3]) };
  637. }
  638. inline AvxQuadDouble sq(void) const
  639. {
  640. auto[a0, b0] = twoProd(x[0], x[0]);
  641. auto[b1, c0] = twoProd(x[0], x[1]);
  642. //auto[b2, c1] = twoProd(x[0], x[1]); //
  643. auto[c2, d0] = twoProd(x[0], x[2]);
  644. auto[c3, d1] = twoProd(x[1], x[1]);
  645. //auto[c4, d2] = twoProd(x[0], x[2]); //
  646. auto d5 = _mm256_mul_pd(x[3], x[0]);
  647. auto d6 = _mm256_mul_pd(x[2], x[1]);
  648. //auto d7 = _mm256_mul_pd(x[1], x[2]); //
  649. //auto d8 = _mm256_mul_pd(x[0], x[3]); //
  650. auto r0 = a0;
  651. auto[r1, c5] = twoSum(b0, _mm256_add_pd(b1, b1)); // d3
  652. auto[r2, d4, e0] = sixThreeSum(_mm256_add_pd(c0, c0), /*c0*/ _mm256_set1_pd(0.0), c2, c3, c2, c5);
  653. auto[r3, e1] = sixTwoSum(d0, d1, d0, d4, _mm256_add_pd(d5, d5), _mm256_add_pd(d6, d6));
  654. auto r4 = _mm256_add_pd(e0, e1);
  655. auto [n0, n1, n2, n3] = renorm2(r0, r1, r2, r3, r4);
  656. return { n0, n1, n2, n3 };
  657. }
  658. };
  659. template<bool parallel>
  660. void CpuGenerator<mnd::QuadDouble, mnd::X86_AVX_FMA, parallel>::generate(const mnd::MandelInfo& info, float* data)
  661. {
  662. const MandelViewport& view = info.view;
  663. using T = mnd::Real;
  664. T viewx = mnd::convert<T>(view.x);
  665. T viewy = mnd::convert<T>(view.y);
  666. T wpp = mnd::convert<T>(view.width / info.bWidth);
  667. T hpp = mnd::convert<T>(view.height / info.bHeight);
  668. T jX = mnd::convert<T>(info.juliaX);
  669. T jY = mnd::convert<T>(info.juliaY);
  670. auto toQd = [] (const mnd::Real& x) -> std::tuple<double, double, double, double> {
  671. double a = double(x);
  672. mnd::Real rem = x - a;
  673. double b = double(rem);
  674. rem = rem - b;
  675. double c = double(rem);
  676. rem = rem - c;
  677. double d = double(rem);
  678. return { a, b, c, d };
  679. };
  680. auto toAvxQuadDouble = [&toQd] (const mnd::Real& x) -> AvxQuadDouble {
  681. auto [a, b, c, d] = toQd(x);
  682. return AvxQuadDouble{ a, b, c, d };
  683. };
  684. auto toAvxQuadDouble4 = [&toQd] (const mnd::Real& a, const mnd::Real& b,
  685. const mnd::Real& c, const mnd::Real& d) -> AvxQuadDouble {
  686. auto [x0, y0, z0, u0] = toQd(a);
  687. auto [x1, y1, z1, u1] = toQd(b);
  688. auto [x2, y2, z2, u2] = toQd(c);
  689. auto [x3, y3, z3, u3] = toQd(d);
  690. __m256d xs = { x0, x1, x2, x3 };
  691. __m256d ys = { y0, y1, y2, y3 };
  692. __m256d zs = { z0, z1, z2, z3 };
  693. __m256d us = { u0, u1, u2, u3 };
  694. return AvxQuadDouble{ xs, ys, zs, us };
  695. };
  696. AvxQuadDouble juliaX = toAvxQuadDouble(jX);
  697. AvxQuadDouble juliaY = toAvxQuadDouble(jY);
  698. #if defined(_OPENMP)
  699. if constexpr(parallel)
  700. omp_set_num_threads(omp_get_num_procs());
  701. # pragma omp parallel for schedule(static, 1) if (parallel)
  702. #endif
  703. for (long j = 0; j < info.bHeight; j++) {
  704. T y = viewy + T(double(j)) * hpp;
  705. AvxQuadDouble ys = toAvxQuadDouble(y);
  706. for (long i = 0; i < info.bWidth; i += 4) {
  707. T x1 = viewx + T(double(i)) * wpp;
  708. T x2 = x1 + wpp;
  709. T x3 = x2 + wpp;
  710. T x4 = x3 + wpp;
  711. AvxQuadDouble xs = toAvxQuadDouble4(x1, x2, x3, x4);
  712. AvxQuadDouble cx = info.julia ? juliaX : xs;
  713. AvxQuadDouble cy = info.julia ? juliaY : ys;
  714. int itRes[4] = { 0, 0, 0, 0 };
  715. __m256d threshold = { 16.0, 16.0, 16.0, 16.0 };
  716. __m256d counter = { 0, 0, 0, 0 };
  717. __m256d adder = { 1, 1, 1, 1 };
  718. AvxQuadDouble a = xs;
  719. AvxQuadDouble b = ys;
  720. __m256d resultsa;
  721. __m256d resultsb;
  722. __m256d cmp = _mm256_cmp_pd(threshold, threshold, _CMP_LE_OQ);
  723. for (int k = 0; k < info.maxIter; k++) {
  724. AvxQuadDouble aa = a.sq();
  725. AvxQuadDouble bb = b.sq();
  726. AvxQuadDouble abab = a * b.mul_pow2(2.0);
  727. a = aa - bb + cx;
  728. b = abab + cy;
  729. if (info.smooth) {
  730. resultsa = _mm256_blendv_pd(resultsa, a.x[0], cmp);
  731. resultsb = _mm256_blendv_pd(resultsb, b.x[0], cmp);
  732. }
  733. cmp = _mm256_cmp_pd(_mm256_add_pd(aa.x[0], bb.x[0]), threshold, _CMP_LE_OQ);
  734. adder = _mm256_and_pd(adder, cmp);
  735. counter = _mm256_add_pd(counter, adder);
  736. if (_mm256_testz_si256(_mm256_castpd_si256(cmp), _mm256_castpd_si256(cmp)) != 0) {
  737. break;
  738. }
  739. }
  740. auto alignVec = [](double* data) -> double* {
  741. void* aligned = data;
  742. ::size_t length = 64;
  743. std::align(32, 4 * sizeof(double), aligned, length);
  744. return static_cast<double*>(aligned);
  745. };
  746. double resData[8];
  747. double* ftRes = alignVec(resData);
  748. double* resa = (double*) &resultsa;
  749. double* resb = (double*) &resultsb;
  750. _mm256_store_pd(ftRes, counter);
  751. for (int k = 0; k < 4 && i + k < info.bWidth; k++) {
  752. if (info.smooth)
  753. data[i + k + j * info.bWidth] = ftRes[k] < 0 ? info.maxIter :
  754. ftRes[k] >= info.maxIter ? info.maxIter :
  755. ((float)ftRes[k]) + 1 - ::log(::log(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::log(2.0f);
  756. else
  757. data[i + k + j * info.bWidth] = ftRes[k] >= 0 ? float(ftRes[k]) : info.maxIter;
  758. }
  759. }
  760. }
  761. }