CpuGeneratorsAVX.cpp 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726
  1. #include "FloatLog.h"
  2. #include <immintrin.h>
  3. #include <omp.h>
  4. ///
  5. /// \brief unique namespace just to be a little more sure we do not
  6. /// accidentally compile a function used somewhere else and use
  7. /// avx instructions in there.
  8. ///
  9. namespace avx_private
  10. {
  11. #include "LightDoubleDouble.h"
  12. #include "TripleDouble.h"
  13. }
  14. void generateFloatAvx(long width, long height, float* data, bool parallel,
  15. float vx, float vy, float vw, float vh, int maxIter, bool smooth,
  16. bool julia, float jX, float jY)
  17. {
  18. using T = float;
  19. const float dppf = float(vw / width);
  20. __m256 viewx = _mm256_set1_ps(vx);
  21. __m256 dpp = _mm256_set1_ps(dppf);
  22. __m256 juliaX = { jX, jX, jX, jX, jX, jX, jX, jX };
  23. __m256 juliaY = { jY, jY, jY, jY, jY, jY, jY, jY };
  24. #if defined(_OPENMP)
  25. if (parallel)
  26. omp_set_num_threads(omp_get_num_procs());
  27. # pragma omp parallel for schedule(static, 1) if (parallel)
  28. #endif
  29. for (long j = 0; j < height; j++) {
  30. T y = vy + T(j) * vw / height;
  31. __m256 ys = _mm256_set1_ps(y);
  32. for (long i = 0; i < width; i += 16) {
  33. __m256 pixc = { float(i), float(i + 1), float(i + 2), float(i + 3), float(i + 4), float(i + 5), float(i + 6), float(i + 7) };
  34. __m256 pixc2 = { float(i + 8), float(i + 9), float(i + 10), float(i + 11), float(i + 12), float(i + 13), float(i + 14), float(i + 15) };
  35. __m256 xs = _mm256_add_ps(_mm256_mul_ps(dpp, pixc), viewx);
  36. __m256 xs2 = _mm256_add_ps(_mm256_mul_ps(dpp, pixc2), viewx);
  37. __m256 counter = _mm256_setzero_ps();
  38. __m256 adder = _mm256_set1_ps(1);
  39. __m256 counter2 = _mm256_setzero_ps();
  40. __m256 adder2 = _mm256_set1_ps(1);
  41. __m256 threshold = _mm256_set1_ps(16);
  42. __m256 a = xs;
  43. __m256 a2 = xs2;
  44. __m256 b = ys;
  45. __m256 b2 = ys;
  46. __m256 cx = julia ? juliaX : xs;
  47. __m256 cx2 = julia ? juliaX : xs2;
  48. __m256 cy = julia ? juliaY : ys;
  49. __m256 resultsa = a;
  50. __m256 resultsb = b;
  51. __m256 resultsa2 = a2;
  52. __m256 resultsb2 = b2;
  53. if (smooth) {
  54. __m256 cmp = _mm256_cmp_ps(a, a, _CMP_LE_OQ);
  55. __m256 cmp2 = _mm256_cmp_ps(a, a, _CMP_LE_OQ);
  56. for (int k = 0; k < maxIter; k++) {
  57. __m256 aa = _mm256_mul_ps(a, a);
  58. __m256 aa2 = _mm256_mul_ps(a2, a2);
  59. __m256 bb = _mm256_mul_ps(b, b);
  60. __m256 bb2 = _mm256_mul_ps(b2, b2);
  61. __m256 abab = _mm256_mul_ps(a, b); abab = _mm256_add_ps(abab, abab);
  62. __m256 abab2 = _mm256_mul_ps(a2, b2); abab2 = _mm256_add_ps(abab2, abab2);
  63. a = _mm256_add_ps(_mm256_sub_ps(aa, bb), cx);
  64. a2 = _mm256_add_ps(_mm256_sub_ps(aa2, bb2), cx2);
  65. b = _mm256_add_ps(abab, cy);
  66. b2 = _mm256_add_ps(abab2, cy);
  67. resultsa = _mm256_or_ps(_mm256_andnot_ps(cmp, resultsa), _mm256_and_ps(cmp, a));
  68. resultsb = _mm256_or_ps(_mm256_andnot_ps(cmp, resultsb), _mm256_and_ps(cmp, b));
  69. resultsa2 = _mm256_or_ps(_mm256_andnot_ps(cmp2, resultsa2), _mm256_and_ps(cmp2, a2));
  70. resultsb2 = _mm256_or_ps(_mm256_andnot_ps(cmp2, resultsb2), _mm256_and_ps(cmp2, b2));
  71. cmp = _mm256_cmp_ps(_mm256_add_ps(aa, bb), threshold, _CMP_LE_OQ);
  72. cmp2 = _mm256_cmp_ps(_mm256_add_ps(aa2, bb2), threshold, _CMP_LE_OQ);
  73. adder = _mm256_and_ps(adder, cmp);
  74. counter = _mm256_add_ps(counter, adder);
  75. adder2 = _mm256_and_ps(adder2, cmp2);
  76. counter2 = _mm256_add_ps(counter2, adder2);
  77. if ((k & 0x7) == 0 && _mm256_testz_ps(cmp, cmp) != 0 && _mm256_testz_ps(cmp2, cmp2) != 0) {
  78. break;
  79. }
  80. }
  81. }
  82. else {
  83. for (int k = 0; k < maxIter; k++) {
  84. __m256 aa = _mm256_mul_ps(a, a);
  85. __m256 aa2 = _mm256_mul_ps(a2, a2);
  86. __m256 bb = _mm256_mul_ps(b, b);
  87. __m256 bb2 = _mm256_mul_ps(b2, b2);
  88. __m256 abab = _mm256_mul_ps(a, b); abab = _mm256_add_ps(abab, abab);
  89. __m256 abab2 = _mm256_mul_ps(a2, b2); abab2 = _mm256_add_ps(abab2, abab2);
  90. a = _mm256_add_ps(_mm256_sub_ps(aa, bb), cx);
  91. a2 = _mm256_add_ps(_mm256_sub_ps(aa2, bb2), cx2);
  92. b = _mm256_add_ps(abab, cy);
  93. b2 = _mm256_add_ps(abab2, cy);
  94. __m256 cmp = _mm256_cmp_ps(_mm256_add_ps(aa, bb), threshold, _CMP_LE_OQ);
  95. __m256 cmp2 = _mm256_cmp_ps(_mm256_add_ps(aa2, bb2), threshold, _CMP_LE_OQ);
  96. adder = _mm256_and_ps(adder, cmp);
  97. counter = _mm256_add_ps(counter, adder);
  98. adder2 = _mm256_and_ps(adder2, cmp2);
  99. counter2 = _mm256_add_ps(counter2, adder2);
  100. if ((k & 0x7) == 0 && _mm256_testz_ps(cmp, cmp) != 0 && _mm256_testz_ps(cmp2, cmp2) != 0) {
  101. break;
  102. }
  103. }
  104. }
  105. float resData[64];
  106. float* ftRes = resData;
  107. float* resa = ftRes + 16;
  108. float* resb = resa + 16;
  109. _mm256_storeu_ps(ftRes, counter);
  110. _mm256_storeu_ps(ftRes + 8, counter2);
  111. _mm256_storeu_ps(resa, resultsa);
  112. _mm256_storeu_ps(resa + 8, resultsa2);
  113. _mm256_storeu_ps(resb, resultsb);
  114. _mm256_storeu_ps(resb + 8, resultsb2);
  115. for (int k = 0; k < 16 && i + k < width; k++) {
  116. if (smooth) {
  117. data[i + k + j * width] = ftRes[k] < 0 ? maxIter :
  118. ftRes[k] >= maxIter ? maxIter :
  119. ((float)ftRes[k]) + 1 - floatLog2(floatLog(resa[k] * resa[k] + resb[k] * resb[k]) * 0.5f);
  120. }
  121. else {
  122. data[i + k + j * width] = ftRes[k] < 0 ? maxIter : ftRes[k];
  123. }
  124. }
  125. }
  126. }
  127. }
  128. void generateDoubleAvx(long width, long height, float* data, bool parallel,
  129. double vx, double vy, double vw, double vh, int maxIter, bool smooth,
  130. bool julia, double jX, double jY)
  131. {
  132. using T = double;
  133. const double dppf = double(vw / width);
  134. __m256d viewx = { vx, vx, vx, vx };
  135. __m256d dpp = { dppf, dppf, dppf, dppf };
  136. __m256d juliaX = { jX, jX, jX, jX };
  137. __m256d juliaY = { jY, jY, jY, jY };
  138. #if defined(_OPENMP)
  139. if (parallel)
  140. omp_set_num_threads(omp_get_num_procs());
  141. # pragma omp parallel for schedule(static, 1) if (parallel)
  142. #endif
  143. for (long j = 0; j < height; j++) {
  144. T y = vy + T(j) * vh / height;
  145. __m256d ys = { y, y, y, y };
  146. for (long i = 0; i < width; i += 8) {
  147. __m256d pixc = { double(i), double(i + 1), double(i + 2), double(i + 3) };
  148. __m256d pixc2 = { double(i + 4), double(i + 5), double(i + 6), double(i + 7) };
  149. __m256d xs = _mm256_add_pd(_mm256_mul_pd(dpp, pixc), viewx);
  150. __m256d xs2 = _mm256_add_pd(_mm256_mul_pd(dpp, pixc2), viewx);
  151. int itRes[4] = { 0, 0, 0, 0 };
  152. __m256d threshold = { 16.0, 16.0, 16.0, 16.0 };
  153. __m256d counter = { 0, 0, 0, 0 };
  154. __m256d adder = { 1, 1, 1, 1 };
  155. __m256d counter2 = { 0, 0, 0, 0 };
  156. __m256d adder2 = { 1, 1, 1, 1 };
  157. __m256d resultsa = { 0, 0, 0, 0 };
  158. __m256d resultsb = { 0, 0, 0, 0 };
  159. __m256d resultsa2 = { 0, 0, 0, 0 };
  160. __m256d resultsb2 = { 0, 0, 0, 0 };
  161. __m256d a = xs;
  162. __m256d b = ys;
  163. __m256d a2 = xs2;
  164. __m256d b2 = ys;
  165. __m256d cx = julia ? juliaX : xs;
  166. __m256d cx2 = julia ? juliaX : xs2;
  167. __m256d cy = julia ? juliaY : ys;
  168. if (smooth) {
  169. __m256d cmp = _mm256_cmp_pd(a, a, _CMP_LE_OQ);
  170. __m256d cmp2 = _mm256_cmp_pd(a, a, _CMP_LE_OQ);
  171. for (int k = 0; k < maxIter; k++) {
  172. __m256d aa = _mm256_mul_pd(a, a);
  173. __m256d aa2 = _mm256_mul_pd(a2, a2);
  174. __m256d bb = _mm256_mul_pd(b, b);
  175. __m256d bb2 = _mm256_mul_pd(b2, b2);
  176. __m256d abab = _mm256_mul_pd(a, b); abab = _mm256_add_pd(abab, abab);
  177. __m256d abab2 = _mm256_mul_pd(a2, b2); abab2 = _mm256_add_pd(abab2, abab2);
  178. a = _mm256_add_pd(_mm256_sub_pd(aa, bb), cx);
  179. a2 = _mm256_add_pd(_mm256_sub_pd(aa2, bb2), cx2);
  180. b = _mm256_add_pd(abab, cy);
  181. b2 = _mm256_add_pd(abab2, cy);
  182. resultsa = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsa), _mm256_and_pd(cmp, a));
  183. resultsb = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsb), _mm256_and_pd(cmp, b));
  184. resultsa2 = _mm256_or_pd(_mm256_andnot_pd(cmp2, resultsa2), _mm256_and_pd(cmp2, a2));
  185. resultsb2 = _mm256_or_pd(_mm256_andnot_pd(cmp2, resultsb2), _mm256_and_pd(cmp2, b2));
  186. cmp = _mm256_cmp_pd(_mm256_add_pd(aa, bb), threshold, _CMP_LE_OQ);
  187. cmp2 = _mm256_cmp_pd(_mm256_add_pd(aa2, bb2), threshold, _CMP_LE_OQ);
  188. adder = _mm256_and_pd(adder, cmp);
  189. counter = _mm256_add_pd(counter, adder);
  190. adder2 = _mm256_and_pd(adder2, cmp2);
  191. counter2 = _mm256_add_pd(counter2, adder2);
  192. if ((k & 0x7) == 0 && _mm256_testz_si256(_mm256_castpd_si256(cmp), _mm256_castpd_si256(cmp)) != 0 &&
  193. _mm256_testz_si256(_mm256_castpd_si256(cmp2), _mm256_castpd_si256(cmp2)) != 0) {
  194. break;
  195. }
  196. }
  197. }
  198. else {
  199. for (int k = 0; k < maxIter; k++) {
  200. __m256d aa = _mm256_mul_pd(a, a);
  201. __m256d aa2 = _mm256_mul_pd(a2, a2);
  202. __m256d bb = _mm256_mul_pd(b, b);
  203. __m256d bb2 = _mm256_mul_pd(b2, b2);
  204. __m256d abab = _mm256_mul_pd(a, b); abab = _mm256_add_pd(abab, abab);
  205. __m256d abab2 = _mm256_mul_pd(a2, b2); abab2 = _mm256_add_pd(abab2, abab2);
  206. a = _mm256_add_pd(_mm256_sub_pd(aa, bb), cx);
  207. a2 = _mm256_add_pd(_mm256_sub_pd(aa2, bb2), cx2);
  208. b = _mm256_add_pd(abab, cy);
  209. b2 = _mm256_add_pd(abab2, cy);
  210. __m256d cmp = _mm256_cmp_pd(_mm256_add_pd(aa, bb), threshold, _CMP_LE_OQ);
  211. __m256d cmp2 = _mm256_cmp_pd(_mm256_add_pd(aa2, bb2), threshold, _CMP_LE_OQ);
  212. adder = _mm256_and_pd(adder, cmp);
  213. counter = _mm256_add_pd(counter, adder);
  214. adder2 = _mm256_and_pd(adder2, cmp2);
  215. counter2 = _mm256_add_pd(counter2, adder2);
  216. if ((k & 0x7) == 0 && _mm256_testz_si256(_mm256_castpd_si256(cmp), _mm256_castpd_si256(cmp)) != 0 &&
  217. _mm256_testz_si256(_mm256_castpd_si256(cmp2), _mm256_castpd_si256(cmp2)) != 0) {
  218. break;
  219. }
  220. }
  221. }
  222. double resData[8];
  223. double* ftRes = resData;
  224. double* resa = (double*) &resultsa;
  225. double* resb = (double*) &resultsb;
  226. _mm256_storeu_pd(ftRes, counter);
  227. for (int k = 0; k < 4 && i + k < width; k++) {
  228. if (smooth)
  229. data[i + k + j * width] = ftRes[k] < 0 ? float(maxIter) :
  230. ftRes[k] >= maxIter ? float(maxIter) :
  231. float(((float)ftRes[k]) + 1 - floatLog2(floatLog(float(resa[k] * resa[k] + resb[k] * resb[k])) / 2));
  232. else
  233. data[i + k + j * width] = ftRes[k] >= 0 ? float(ftRes[k]) : maxIter;
  234. }
  235. resa = (double*) &resultsa2;
  236. resb = (double*) &resultsb2;
  237. _mm256_storeu_pd(ftRes, counter2);
  238. i += 4;
  239. for (int k = 0; k < 4 && i + k < width; k++) {
  240. if (smooth)
  241. data[i + k + j * width] = ftRes[k] < 0 ? float(maxIter) :
  242. ftRes[k] >= maxIter ? float(maxIter) :
  243. float(((float)ftRes[k]) + 1 - floatLog2(floatLog(float(resa[k] * resa[k] + resb[k] * resb[k])) / 2));
  244. else
  245. data[i + k + j * width] = ftRes[k] >= 0 ? float(ftRes[k]) : maxIter;
  246. }
  247. i -= 4;
  248. }
  249. }
  250. }
  251. namespace avx_private
  252. {
  253. struct VecPair
  254. {
  255. __m256d a;
  256. __m256d b;
  257. };
  258. static inline VecPair quickTwoSum(__m256d a, __m256d b)
  259. {
  260. __m256d s = _mm256_add_pd(a, b);
  261. __m256d e = _mm256_sub_pd(b, _mm256_sub_pd(s, a));
  262. return { s, e };
  263. }
  264. static inline VecPair quickTwoDiff(__m256d a, __m256d b)
  265. {
  266. __m256d s = _mm256_sub_pd(a, b);
  267. __m256d e = _mm256_sub_pd(_mm256_sub_pd(a, s), b);
  268. return { s, e };
  269. }
  270. static inline VecPair twoSum(__m256d a, __m256d b)
  271. {
  272. __m256d s = _mm256_add_pd(a, b);
  273. __m256d bb = _mm256_sub_pd(s, a);
  274. __m256d e = _mm256_add_pd(_mm256_sub_pd(a, _mm256_sub_pd(s, bb)), _mm256_sub_pd(b, bb));
  275. return { s, e };
  276. }
  277. static inline VecPair twoDiff(__m256d a, __m256d b)
  278. {
  279. __m256d s = _mm256_sub_pd(a, b);
  280. __m256d bb = _mm256_sub_pd(s, a);
  281. __m256d e = _mm256_sub_pd(_mm256_sub_pd(a, _mm256_sub_pd(s, bb)), _mm256_add_pd(b, bb));
  282. return { s, e };
  283. }
  284. static inline VecPair threeTwoSum(__m256d a, __m256d b, __m256d c)
  285. {
  286. auto[t1, t2] = twoSum(a, b);
  287. auto[r0, t3] = twoSum(t1, c);
  288. return { r0, _mm256_add_pd(t2, t3) };
  289. }
  290. static inline VecPair split(__m256d a)
  291. {
  292. /*
  293. // -- this should never happen when doing mandelbrot calculations,
  294. // so we omit this check.
  295. if (a > _QD_SPLIT_THRESH || a < -_QD_SPLIT_THRESH) {
  296. a *= 3.7252902984619140625e-09; // 2^-28
  297. temp = _QD_SPLITTER * a;
  298. hi = temp - (temp - a);
  299. lo = a - hi;
  300. hi *= 268435456.0; // 2^28
  301. lo *= 268435456.0; // 2^28
  302. } else {
  303. temp = _QD_SPLITTER * a;
  304. hi = temp - (temp - a);
  305. lo = a - hi;
  306. }
  307. */
  308. static const __m256d SPLITTER = { 134217729.0, 134217729.0, 134217729.0, 134217729.0 };
  309. __m256d temp = _mm256_mul_pd(SPLITTER, a);
  310. __m256d hi = _mm256_sub_pd(temp, _mm256_sub_pd(temp, a));
  311. __m256d lo = _mm256_sub_pd(a, hi);
  312. return { hi, lo };
  313. }
  314. static inline VecPair twoProd(__m256d a, __m256d b)
  315. {
  316. __m256d p = _mm256_mul_pd(a, b);
  317. auto[a_hi, a_lo] = split(a);
  318. auto[b_hi, b_lo] = split(b);
  319. __m256d err = _mm256_add_pd(_mm256_add_pd(_mm256_sub_pd(_mm256_mul_pd(a_hi, b_hi), p), _mm256_add_pd(_mm256_mul_pd(a_hi, b_lo), _mm256_mul_pd(a_lo, b_hi))), _mm256_mul_pd(a_lo, b_lo));
  320. return { p, err };
  321. }
  322. static inline VecPair twoSq(__m256d a)
  323. {
  324. __m256d p = _mm256_mul_pd(a, a);
  325. auto[a_hi, a_lo] = split(a);
  326. __m256d err =
  327. _mm256_add_pd(
  328. _mm256_add_pd(
  329. _mm256_sub_pd(_mm256_mul_pd(a_hi, a_hi), p),
  330. _mm256_add_pd(
  331. _mm256_mul_pd(a_hi, a_lo),
  332. _mm256_mul_pd(a_hi, a_lo)
  333. )
  334. ),
  335. _mm256_mul_pd(a_lo, a_lo)
  336. );
  337. return { p, err };
  338. }
  339. struct AvxDoubleDouble
  340. {
  341. __m256d x[2];
  342. inline AvxDoubleDouble(__m256d a, __m256d b) :
  343. x{ a, b }
  344. {}
  345. inline AvxDoubleDouble(double a, double b) :
  346. x{ _mm256_set1_pd(a), _mm256_set1_pd(b) }
  347. {}
  348. inline AvxDoubleDouble operator + (const AvxDoubleDouble& sm) const
  349. {
  350. auto[s, e] = twoSum(x[0], sm.x[0]);
  351. e = _mm256_add_pd(e, _mm256_add_pd(x[1], sm.x[1]));
  352. auto[r1, r2] = quickTwoSum(s, e);
  353. return AvxDoubleDouble{ r1, r2 };
  354. }
  355. inline AvxDoubleDouble operator - (const AvxDoubleDouble& sm) const
  356. {
  357. auto[s, e] = twoDiff(x[0], sm.x[0]);
  358. e = _mm256_add_pd(e, x[1]);
  359. e = _mm256_sub_pd(e, sm.x[1]);
  360. auto[r1, r2] = quickTwoSum(s, e);
  361. return AvxDoubleDouble{ r1, r2 };
  362. }
  363. inline AvxDoubleDouble operator * (const AvxDoubleDouble& sm) const
  364. {
  365. auto[p1, p2] = twoProd(this->x[0], sm.x[0]);
  366. p2 = _mm256_add_pd(p2,
  367. _mm256_add_pd(_mm256_mul_pd(sm.x[1], x[0]), _mm256_mul_pd(sm.x[0], x[1])) );
  368. auto[r1, r2] = quickTwoSum(p1, p2);
  369. return AvxDoubleDouble{ r1, r2 };
  370. }
  371. inline AvxDoubleDouble sq(void) const
  372. {
  373. auto[p1, p2] = twoSq(x[0]);
  374. __m256d x01_2 = _mm256_mul_pd(_mm256_add_pd(x[0], x[0]), x[1]);
  375. p2 = _mm256_add_pd(p2, x01_2);
  376. auto[r1, r2] = quickTwoSum(p1, p2);
  377. return AvxDoubleDouble{ r1, r2 };
  378. }
  379. inline AvxDoubleDouble twice(void) const
  380. {
  381. return AvxDoubleDouble{ _mm256_add_pd(x[0], x[0]), _mm256_add_pd(x[1], x[1]) };
  382. }
  383. };
  384. struct AvxTripleDouble
  385. {
  386. __m256d x[3];
  387. inline AvxTripleDouble(__m256d a, __m256d b, __m256d c) :
  388. x{ a, b, c }
  389. {}
  390. inline AvxTripleDouble(double a, double b, double c) :
  391. x{ _mm256_set1_pd(a), _mm256_set1_pd(b), _mm256_set1_pd(c) }
  392. {}
  393. inline AvxTripleDouble operator + (const AvxTripleDouble& b) const
  394. {
  395. const auto& a = *this;
  396. auto[r0, t0] = twoSum(a.x[0], b.x[0]);
  397. auto[t1, t2] = twoSum(a.x[1], b.x[1]);
  398. auto[r1, t3] = twoSum(t0, t1);
  399. auto r2 = _mm256_add_pd(_mm256_add_pd(t2, _mm256_add_pd(a.x[2], b.x[2])), t3);
  400. auto[re1, t4] = quickTwoSum(r0, r1);
  401. auto[re2, re3] = quickTwoSum(t4, r2);
  402. return { re1, re2, re3 };
  403. }
  404. inline AvxTripleDouble operator - (const AvxTripleDouble& b) const
  405. {
  406. const auto& a = *this;
  407. auto[r0, t0] = twoDiff(a.x[0], b.x[0]);
  408. auto[t1, t2] = twoDiff(a.x[1], b.x[1]);
  409. auto[r1, t3] = twoSum(t0, t1);
  410. auto r2 = _mm256_add_pd(_mm256_add_pd(t2, _mm256_sub_pd(a.x[2], b.x[2])), t3);
  411. auto[re1, t4] = quickTwoSum(r0, r1);
  412. auto[re2, re3] = quickTwoSum(t4, r2);
  413. return { re1, re2, re3 };
  414. }
  415. inline AvxTripleDouble operator * (const AvxTripleDouble& b) const
  416. {
  417. const auto& a = *this;
  418. auto[p1_0, p2_0] = twoProd(a.x[0], b.x[0]);
  419. auto[p2_1, p3_0] = twoProd(a.x[0], b.x[1]);
  420. auto[p2_2, p3_1] = twoProd(a.x[1], b.x[0]);
  421. auto[t2, tl3] = threeTwoSum(p2_0, p2_1, p2_2);
  422. auto t3 = _mm256_add_pd(tl3,
  423. _mm256_add_pd(
  424. _mm256_add_pd(p3_0, p3_1),
  425. _mm256_add_pd(
  426. _mm256_mul_pd(a.x[1], b.x[1]),
  427. _mm256_add_pd(
  428. _mm256_mul_pd(a.x[2], b.x[0]),
  429. _mm256_mul_pd(a.x[0], b.x[2])
  430. )
  431. )
  432. )
  433. );
  434. auto[re0, q2] = quickTwoSum(p1_0, t2);
  435. auto[re1, re2] = quickTwoSum(q2, t3);
  436. return { re0, re1, re2 };
  437. }
  438. inline AvxTripleDouble sq(void) const
  439. {
  440. auto twox0 = _mm256_add_pd(x[0], x[0]);
  441. auto[p1_0, p2_0] = twoProd(x[0], x[0]);
  442. auto[p2_1, p3_0] = twoProd(twox0, x[1]);
  443. auto[t2, tl3] = twoSum(p2_0, p2_1);
  444. auto t3 =
  445. _mm256_add_pd(
  446. tl3,
  447. _mm256_add_pd(
  448. p3_0,
  449. _mm256_add_pd(
  450. _mm256_mul_pd(x[1], x[1]),
  451. _mm256_mul_pd(x[2], twox0)
  452. )
  453. )
  454. );
  455. auto[re0, q2] = quickTwoSum(p1_0, t2);
  456. auto[re1, re2] = quickTwoSum(q2, t3);
  457. return { re0, re1, re2 };
  458. }
  459. inline AvxTripleDouble twice(void) const
  460. {
  461. return AvxTripleDouble{ _mm256_add_pd(x[0], x[0]), _mm256_add_pd(x[1], x[1]), _mm256_add_pd(x[2], x[2]) };
  462. }
  463. };
  464. } // namespace avx_private
  465. void generateDoubleDoubleAvx(long width, long height, float* data, bool parallel,
  466. double vx1, double vx2, double vy1, double vy2, double vw1, double vw2, double vh1, double vh2, int maxIter, bool smooth,
  467. bool julia, double jX1, double jX2, double jY1, double jY2)
  468. {
  469. using namespace avx_private;
  470. using T = mnd::LightDoubleDouble;
  471. T viewx{ vx1, vx2 };
  472. T viewy{ vy1, vy2 };
  473. T wpp = T{ vw1, vw2 } * T(1.0 / width);
  474. T hpp = T{ vh1, vh2 } * T(1.0 / height);
  475. T jX{ jX1, jX2 };
  476. T jY{ jY1, jY2 };
  477. AvxDoubleDouble juliaX = { jX[0], jX[1] };
  478. AvxDoubleDouble juliaY = { jY[0], jY[1] };
  479. #if defined(_OPENMP)
  480. if (parallel)
  481. omp_set_num_threads(omp_get_num_procs());
  482. # pragma omp parallel for schedule(static, 1) if (parallel)
  483. #endif
  484. for (long j = 0; j < height; j++) {
  485. T y = viewy + T(double(j)) * hpp;
  486. AvxDoubleDouble ys{ y[0], y[1] };
  487. for (long i = 0; i < width; i += 4) {
  488. T x1 = viewx + T(double(i)) * wpp;
  489. T x2 = x1 + wpp;
  490. T x3 = x2 + wpp;
  491. T x4 = x3 + wpp;
  492. __m256d x0s = {
  493. x1[0], x2[0], x3[0], x4[0],
  494. };
  495. __m256d x1s = {
  496. x1[1], x2[1], x3[1], x4[1],
  497. };
  498. AvxDoubleDouble xs{ x0s, x1s };
  499. AvxDoubleDouble cx = julia ? juliaX : xs;
  500. AvxDoubleDouble cy = julia ? juliaY : ys;
  501. int itRes[4] = { 0, 0, 0, 0 };
  502. __m256d threshold = { 16.0, 16.0, 16.0, 16.0 };
  503. __m256d counter = { 0, 0, 0, 0 };
  504. __m256d adder = { 1, 1, 1, 1 };
  505. AvxDoubleDouble a = xs;
  506. AvxDoubleDouble b = ys;
  507. __m256d resultsa = _mm256_set1_pd(0);
  508. __m256d resultsb = _mm256_set1_pd(0);
  509. __m256d cmp = _mm256_cmp_pd(threshold, threshold, _CMP_LE_OQ);
  510. for (int k = 0; k < maxIter; k++) {
  511. AvxDoubleDouble aa = a.sq();
  512. AvxDoubleDouble bb = b.sq();
  513. AvxDoubleDouble abab = a * b.twice();
  514. a = aa - bb + cx;
  515. b = abab + cy;
  516. if (smooth) {
  517. resultsa = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsa), _mm256_and_pd(cmp, a.x[0]));
  518. resultsb = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsb), _mm256_and_pd(cmp, b.x[0]));
  519. }
  520. cmp = _mm256_cmp_pd(_mm256_add_pd(aa.x[0], bb.x[0]), threshold, _CMP_LE_OQ);
  521. adder = _mm256_and_pd(adder, cmp);
  522. counter = _mm256_add_pd(counter, adder);
  523. if (_mm256_testz_si256(_mm256_castpd_si256(cmp), _mm256_castpd_si256(cmp)) != 0) {
  524. break;
  525. }
  526. }
  527. double resData[8];
  528. double* ftRes = resData;
  529. double* resa = (double*) &resultsa;
  530. double* resb = (double*) &resultsb;
  531. _mm256_storeu_pd(ftRes, counter);
  532. for (int k = 0; k < 4 && i + k < width; k++) {
  533. if (smooth)
  534. data[i + k + j * width] = float(ftRes[k] < 0 ? maxIter :
  535. ftRes[k] >= maxIter ? maxIter :
  536. ((float)ftRes[k]) + 1 - floatLog2(::floatLog(float(resa[k] * resa[k] + resb[k] * resb[k])) / 2));
  537. else
  538. data[i + k + j * width] = ftRes[k] >= 0 ? float(ftRes[k]) : maxIter;
  539. }
  540. }
  541. }
  542. }
  543. void generateTripleDoubleAvx(long width, long height, float* data, bool parallel,
  544. double vx1, double vx2, double vx3, double vy1, double vy2, double vy3,
  545. double vw1, double vw2, double vw3, double vh1, double vh2, double vh3,
  546. int maxIter, bool smooth, bool julia, double jX1,
  547. double jX2, double jX3, double jY1, double jY2, double jY3)
  548. {
  549. using namespace avx_private;
  550. using T = mnd::TripleDouble;
  551. T viewx{ vx1, vx2, vx3 };
  552. T viewy{ vy1, vy2, vy3 };
  553. T wpp = T{ vw1, vw2, vw3 } * T(1.0 / width);
  554. T hpp = T{ vh1, vh2, vh3 } * T(1.0 / height);
  555. T jX{ jX1, jX2, jX3 };
  556. T jY{ jY1, jY2, jY3 };
  557. AvxTripleDouble juliaX = { jX[0], jX[1], jX[2] };
  558. AvxTripleDouble juliaY = { jY[0], jY[1], jY[2] };
  559. #if defined(_OPENMP)
  560. if (parallel)
  561. omp_set_num_threads(omp_get_num_procs());
  562. # pragma omp parallel for schedule(static, 1) if (parallel)
  563. #endif
  564. for (long j = 0; j < height; j++) {
  565. T y = viewy + T(double(j)) * hpp;
  566. AvxTripleDouble ys{ y[0], y[1], y[2] };
  567. for (long i = 0; i < width; i += 4) {
  568. T x1 = viewx + T(double(i)) * wpp;
  569. T x2 = x1 + wpp;
  570. T x3 = x2 + wpp;
  571. T x4 = x3 + wpp;
  572. __m256d x0s = {
  573. x1[0], x2[0], x3[0], x4[0],
  574. };
  575. __m256d x1s = {
  576. x1[1], x2[1], x3[1], x4[1],
  577. };
  578. __m256d x2s = {
  579. x1[2], x2[2], x3[2], x4[2],
  580. };
  581. AvxTripleDouble xs{ x0s, x1s, x2s };
  582. AvxTripleDouble cx = julia ? juliaX : xs;
  583. AvxTripleDouble cy = julia ? juliaY : ys;
  584. int itRes[4] = { 0, 0, 0, 0 };
  585. __m256d threshold = { 16.0, 16.0, 16.0, 16.0 };
  586. __m256d counter = { 0, 0, 0, 0 };
  587. __m256d adder = { 1, 1, 1, 1 };
  588. AvxTripleDouble a = xs;
  589. AvxTripleDouble b = ys;
  590. __m256d resultsa = _mm256_set1_pd(0);
  591. __m256d resultsb = _mm256_set1_pd(0);
  592. __m256d cmp = _mm256_cmp_pd(threshold, threshold, _CMP_LE_OQ);
  593. for (int k = 0; k < maxIter; k++) {
  594. AvxTripleDouble aa = a.sq();
  595. AvxTripleDouble bb = b.sq();
  596. AvxTripleDouble abab = a * b.twice();
  597. a = aa - bb + cx;
  598. b = abab + cy;
  599. if (smooth) {
  600. resultsa = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsa), _mm256_and_pd(cmp, a.x[0]));
  601. resultsb = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsb), _mm256_and_pd(cmp, b.x[0]));
  602. }
  603. cmp = _mm256_cmp_pd(_mm256_add_pd(aa.x[0], bb.x[0]), threshold, _CMP_LE_OQ);
  604. adder = _mm256_and_pd(adder, cmp);
  605. counter = _mm256_add_pd(counter, adder);
  606. if (_mm256_testz_si256(_mm256_castpd_si256(cmp), _mm256_castpd_si256(cmp)) != 0) {
  607. break;
  608. }
  609. }
  610. double resData[8];
  611. double* ftRes = resData;
  612. double* resa = (double*) &resultsa;
  613. double* resb = (double*) &resultsb;
  614. _mm256_storeu_pd(ftRes, counter);
  615. for (int k = 0; k < 4 && i + k < width; k++) {
  616. if (smooth)
  617. data[i + k + j * width] = float(ftRes[k] < 0 ? maxIter :
  618. ftRes[k] >= maxIter ? maxIter :
  619. ((float)ftRes[k]) + 1 - floatLog2(floatLog(float(resa[k] * resa[k] + resb[k] * resb[k])) * 0.5f));
  620. else
  621. data[i + k + j * width] = ftRes[k] >= 0 ? float(ftRes[k]) : maxIter;
  622. }
  623. }
  624. }
  625. }