CpuGeneratorsAVX.cpp 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. #include "CpuGenerators.h"
  2. #include <immintrin.h>
  3. #include <omp.h>
  4. #include <cmath>
  5. #include <utility>
  6. #include <memory>
  7. using mnd::CpuGenerator;
  8. namespace mnd
  9. {
  10. template class CpuGenerator<float, mnd::X86_AVX, false>;
  11. template class CpuGenerator<float, mnd::X86_AVX, true>;
  12. template class CpuGenerator<double, mnd::X86_AVX, false>;
  13. template class CpuGenerator<double, mnd::X86_AVX, true>;
  14. template class CpuGenerator<DoubleDouble, mnd::X86_AVX, false>;
  15. template class CpuGenerator<DoubleDouble, mnd::X86_AVX, true>;
  16. }
  17. template<bool parallel>
  18. void CpuGenerator<float, mnd::X86_AVX, parallel>::generate(const mnd::MandelInfo& info, float* data)
  19. {
  20. using T = float;
  21. const MandelViewport& view = info.view;
  22. if constexpr(parallel)
  23. omp_set_num_threads(omp_get_num_procs());
  24. #pragma omp parallel for schedule(static, 1) if (parallel)
  25. for (long j = 0; j < info.bHeight; j++) {
  26. T y = T(view.y) + T(j) * T(view.height / info.bHeight);
  27. __m256 ys = {y, y, y, y, y, y, y, y};
  28. long i = 0;
  29. for (i; i < info.bWidth; i += 8) {
  30. __m256 xs = {
  31. float(view.x + float(i) * view.width / info.bWidth),
  32. float(view.x + float(i + 1) * view.width / info.bWidth),
  33. float(view.x + float(i + 2) * view.width / info.bWidth),
  34. float(view.x + float(i + 3) * view.width / info.bWidth),
  35. float(view.x + float(i + 4) * view.width / info.bWidth),
  36. float(view.x + float(i + 5) * view.width / info.bWidth),
  37. float(view.x + float(i + 6) * view.width / info.bWidth),
  38. float(view.x + float(i + 7) * view.width / info.bWidth)
  39. };
  40. __m256 counter = { 0, 0, 0, 0, 0, 0, 0, 0 };
  41. __m256 adder = { 1, 1, 1, 1, 1, 1, 1, 1 };
  42. __m256 resultsa = { 0, 0, 0, 0, 0, 0, 0, 0 };
  43. __m256 resultsb = { 0, 0, 0, 0, 0, 0, 0, 0 };
  44. __m256 threshold = { 16.0f, 16.0f, 16.0f, 16.0f, 16.0f, 16.0f, 16.0f, 16.0f };
  45. __m256 a = xs;
  46. __m256 b = ys;
  47. for (int k = 0; k < info.maxIter; k++) {
  48. __m256 aa = _mm256_mul_ps(a, a);
  49. __m256 bb = _mm256_mul_ps(b, b);
  50. __m256 abab = _mm256_mul_ps(a, b); abab = _mm256_add_ps(abab, abab);
  51. a = _mm256_add_ps(_mm256_sub_ps(aa, bb), xs);
  52. b = _mm256_add_ps(abab, ys);
  53. __m256 cmp = _mm256_cmp_ps(_mm256_add_ps(aa, bb), threshold, _CMP_LE_OQ);
  54. if (info.smooth) {
  55. resultsa = _mm256_or_ps(_mm256_andnot_ps(cmp, resultsa), _mm256_and_ps(cmp, a));
  56. resultsb = _mm256_or_ps(_mm256_andnot_ps(cmp, resultsb), _mm256_and_ps(cmp, b));
  57. }
  58. adder = _mm256_and_ps(adder, cmp);
  59. counter = _mm256_add_ps(counter, adder);
  60. if ((k & 0x7) == 0 && _mm256_testz_ps(cmp, cmp) != 0) {
  61. break;
  62. }
  63. }
  64. auto alignVec = [](float* data) -> float* {
  65. void* aligned = data;
  66. ::size_t length = 64;
  67. std::align(32, 8 * sizeof(float), aligned, length);
  68. return static_cast<float*>(aligned);
  69. };
  70. float resData[16];
  71. float* ftRes = alignVec(resData);
  72. float* resa = (float*) &resultsa;
  73. float* resb = (float*) &resultsb;
  74. _mm256_store_ps(ftRes, counter);
  75. for (int k = 0; k < 8 && i + k < info.bWidth; k++) {
  76. if (info.smooth) {
  77. data[i + k + j * info.bWidth] = ftRes[k] <= 0 ? info.maxIter :
  78. ftRes[k] >= info.maxIter ? info.maxIter :
  79. ((float)ftRes[k]) + 1 - ::log(::log(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::log(2.0f);
  80. }
  81. else {
  82. data[i + k + j * info.bWidth] = ftRes[k] <= 0 ? info.maxIter : ftRes[k];
  83. }
  84. }
  85. }
  86. }
  87. }
  88. template<bool parallel>
  89. void CpuGenerator<double, mnd::X86_AVX, parallel>::generate(const mnd::MandelInfo& info, float* data)
  90. {
  91. using T = double;
  92. const MandelViewport& view = info.view;
  93. if constexpr(parallel)
  94. omp_set_num_threads(omp_get_num_procs());
  95. #pragma omp parallel for schedule(static, 1) if (parallel)
  96. for (long j = 0; j < info.bHeight; j++) {
  97. T y = T(view.y + T(j) * view.height / info.bHeight);
  98. __m256d ys = { y, y, y, y };
  99. long i = 0;
  100. for (i; i < info.bWidth; i += 4) {
  101. __m256d xs = {
  102. double(view.x + double(i) * view.width / info.bWidth),
  103. double(view.x + double(i + 1) * view.width / info.bWidth),
  104. double(view.x + double(i + 2) * view.width / info.bWidth),
  105. double(view.x + double(i + 3) * view.width / info.bWidth)
  106. };
  107. int itRes[4] = { 0, 0, 0, 0 };
  108. __m256d threshold = { 16.0, 16.0, 16.0, 16.0 };
  109. __m256d counter = { 0, 0, 0, 0 };
  110. __m256d adder = { 1, 1, 1, 1 };
  111. __m256d resultsa = { 0, 0, 0, 0 };
  112. __m256d resultsb = { 0, 0, 0, 0 };
  113. __m256d a = xs;
  114. __m256d b = ys;
  115. for (int k = 0; k < info.maxIter; k++) {
  116. __m256d aa = _mm256_mul_pd(a, a);
  117. __m256d bb = _mm256_mul_pd(b, b);
  118. __m256d abab = _mm256_mul_pd(a, b); abab = _mm256_add_pd(abab, abab);
  119. a = _mm256_add_pd(_mm256_sub_pd(aa, bb), xs);
  120. b = _mm256_add_pd(abab, ys);
  121. __m256d cmp = _mm256_cmp_pd(_mm256_add_pd(aa, bb), threshold, _CMP_LE_OQ);
  122. if (info.smooth) {
  123. resultsa = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsa), _mm256_and_pd(cmp, a));
  124. resultsb = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsb), _mm256_and_pd(cmp, b));
  125. }
  126. adder = _mm256_and_pd(adder, cmp);
  127. counter = _mm256_add_pd(counter, adder);
  128. if ((k & 0x3) == 0 && _mm256_testz_si256(_mm256_castpd_si256(cmp), _mm256_castpd_si256(cmp)) != 0) {
  129. break;
  130. }
  131. }
  132. auto alignVec = [](double* data) -> double* {
  133. void* aligned = data;
  134. ::size_t length = 64;
  135. std::align(32, 4 * sizeof(double), aligned, length);
  136. return static_cast<double*>(aligned);
  137. };
  138. double resData[8];
  139. double* ftRes = alignVec(resData);
  140. double* resa = (double*) &resultsa;
  141. double* resb = (double*) &resultsb;
  142. _mm256_store_pd(ftRes, counter);
  143. for (int k = 0; k < 4 && i + k < info.bWidth; k++) {
  144. if (info.smooth)
  145. data[i + k + j * info.bWidth] = ftRes[k] <= 0 ? info.maxIter :
  146. ftRes[k] >= info.maxIter ? info.maxIter :
  147. ((float)ftRes[k]) + 1 - ::log(::log(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::log(2.0f);
  148. else
  149. data[i + k + j * info.bWidth] = ftRes[k] > 0 ? float(ftRes[k]) : info.maxIter;
  150. }
  151. }
  152. }
  153. }
  154. struct VecPair
  155. {
  156. __m256d a;
  157. __m256d b;
  158. };
  159. static inline VecPair quickTwoSum(__m256d a, __m256d b)
  160. {
  161. __m256d s = _mm256_add_pd(a, b);
  162. __m256d e = _mm256_sub_pd(b, _mm256_sub_pd(s, a));
  163. return { s, e };
  164. }
  165. static inline VecPair quickTwoDiff(__m256d a, __m256d b)
  166. {
  167. __m256d s = _mm256_sub_pd(a, b);
  168. __m256d e = _mm256_sub_pd(_mm256_sub_pd(a, s), b);
  169. return { s, e };
  170. }
  171. static inline VecPair twoSum(__m256d a, __m256d b)
  172. {
  173. __m256d s = _mm256_add_pd(a, b);
  174. __m256d bb = _mm256_sub_pd(s, a);
  175. __m256d e = _mm256_add_pd(_mm256_sub_pd(a, _mm256_sub_pd(s, bb)), _mm256_sub_pd(b, bb));
  176. return { s, e };
  177. }
  178. static inline VecPair twoDiff(__m256d a, __m256d b)
  179. {
  180. __m256d s = _mm256_sub_pd(a, b);
  181. __m256d bb = _mm256_sub_pd(s, a);
  182. __m256d e = _mm256_sub_pd(_mm256_sub_pd(a, _mm256_sub_pd(s, bb)), _mm256_add_pd(b, bb));
  183. return { s, e };
  184. }
  185. static inline VecPair split(__m256d a)
  186. {
  187. /*
  188. // -- this should never happen when doing mandelbrot calculations,
  189. // so we omit this check.
  190. if (a > _QD_SPLIT_THRESH || a < -_QD_SPLIT_THRESH) {
  191. a *= 3.7252902984619140625e-09; // 2^-28
  192. temp = _QD_SPLITTER * a;
  193. hi = temp - (temp - a);
  194. lo = a - hi;
  195. hi *= 268435456.0; // 2^28
  196. lo *= 268435456.0; // 2^28
  197. } else {
  198. temp = _QD_SPLITTER * a;
  199. hi = temp - (temp - a);
  200. lo = a - hi;
  201. }
  202. */
  203. static const __m256d SPLITTER = { 134217729.0, 134217729.0, 134217729.0, 134217729.0 };
  204. __m256d temp = _mm256_mul_pd(SPLITTER, a);
  205. __m256d hi = _mm256_sub_pd(temp, _mm256_sub_pd(temp, a));
  206. __m256d lo = _mm256_sub_pd(a, hi);
  207. return { hi, lo };
  208. }
  209. static inline VecPair twoProd(__m256d a, __m256d b)
  210. {
  211. __m256d p = _mm256_mul_pd(a, b);
  212. auto[a_hi, a_lo] = split(a);
  213. auto[b_hi, b_lo] = split(b);
  214. __m256d err = _mm256_add_pd(_mm256_add_pd(_mm256_sub_pd(_mm256_mul_pd(a_hi, b_hi), p), _mm256_add_pd(_mm256_mul_pd(a_hi, b_lo), _mm256_mul_pd(a_lo, b_hi))), _mm256_mul_pd(a_lo, b_lo));
  215. return { p, err };
  216. }
  217. struct AvxDoubleDouble
  218. {
  219. __m256d x[2];
  220. inline AvxDoubleDouble(__m256d a, __m256d b) :
  221. x{ a, b }
  222. {}
  223. inline AvxDoubleDouble operator + (const AvxDoubleDouble& sm) const
  224. {
  225. auto[s, e] = twoSum(x[0], sm.x[0]);
  226. e = _mm256_add_pd(e, _mm256_add_pd(x[1], sm.x[1]));
  227. auto[r1, r2] = quickTwoSum(s, e);
  228. return AvxDoubleDouble{ r1, r2 };
  229. }
  230. inline AvxDoubleDouble operator - (const AvxDoubleDouble& sm) const
  231. {
  232. auto[s, e] = twoDiff(x[0], sm.x[0]);
  233. e = _mm256_add_pd(e, x[1]);
  234. e = _mm256_sub_pd(e, sm.x[1]);
  235. auto[r1, r2] = quickTwoSum(s, e);
  236. return AvxDoubleDouble{ r1, r2 };
  237. }
  238. inline AvxDoubleDouble operator * (const AvxDoubleDouble& sm) const
  239. {
  240. auto[p1, p2] = twoProd(this->x[0], sm.x[0]);
  241. p2 = _mm256_add_pd(p2,
  242. _mm256_add_pd(_mm256_mul_pd(sm.x[1], x[0]), _mm256_mul_pd(sm.x[0], x[1])) );
  243. auto[r1, r2] = quickTwoSum(p1, p2);
  244. return AvxDoubleDouble{ r1, r2 };
  245. }
  246. };
  247. template<bool parallel>
  248. void CpuGenerator<mnd::DoubleDouble, mnd::X86_AVX, parallel>::generate(const mnd::MandelInfo& info, float* data)
  249. {
  250. const MandelViewport& view = info.view;
  251. using T = DoubleDouble;
  252. T viewx = mnd::convert<T>(view.x);
  253. T viewy = mnd::convert<T>(view.y);
  254. T wpp = mnd::convert<T>(view.width / info.bWidth);
  255. T hpp = mnd::convert<T>(view.height / info.bHeight);
  256. if constexpr(parallel)
  257. omp_set_num_threads(omp_get_num_procs());
  258. #pragma omp parallel for schedule(static, 1) if (parallel)
  259. for (long j = 0; j < info.bHeight; j++) {
  260. T y = viewy + T(double(j)) * hpp;
  261. __m256d y0s = { y.x[0], y.x[0], y.x[0], y.x[0] };
  262. __m256d y1s = { y.x[1], y.x[1], y.x[1], y.x[1] };
  263. AvxDoubleDouble ys{ y0s, y1s };
  264. long i = 0;
  265. for (i; i < info.bWidth; i += 4) {
  266. T x1 = viewx + T(double(i)) * wpp;
  267. T x2 = x1 + wpp;
  268. T x3 = x2 + wpp;
  269. T x4 = x3 + wpp;
  270. __m256d x0s = {
  271. x1.x[0], x2.x[0], x3.x[0], x4.x[0],
  272. };
  273. __m256d x1s = {
  274. x1.x[1], x2.x[1], x3.x[1], x4.x[1],
  275. };
  276. AvxDoubleDouble xs{ x0s, x1s };
  277. int itRes[4] = { 0, 0, 0, 0 };
  278. __m256d threshold = { 16.0, 16.0, 16.0, 16.0 };
  279. __m256d counter = { 0, 0, 0, 0 };
  280. __m256d adder = { 1, 1, 1, 1 };
  281. AvxDoubleDouble a = xs;
  282. AvxDoubleDouble b = ys;
  283. __m256d resultsa;
  284. __m256d resultsb;
  285. for (int k = 0; k < info.maxIter; k++) {
  286. AvxDoubleDouble aa = a * a;
  287. AvxDoubleDouble bb = b * b;
  288. AvxDoubleDouble abab = a * b; abab = abab + abab;
  289. a = aa - bb + xs;
  290. b = abab + ys;
  291. __m256d cmp = _mm256_cmp_pd(_mm256_add_pd(aa.x[0], bb.x[0]), threshold, _CMP_LE_OQ);
  292. if (info.smooth) {
  293. resultsa = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsa), _mm256_and_pd(cmp, a.x[0]));
  294. resultsb = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsb), _mm256_and_pd(cmp, b.x[0]));
  295. }
  296. adder = _mm256_and_pd(adder, cmp);
  297. counter = _mm256_add_pd(counter, adder);
  298. if (_mm256_testz_si256(_mm256_castpd_si256(cmp), _mm256_castpd_si256(cmp)) != 0) {
  299. break;
  300. }
  301. }
  302. auto alignVec = [](double* data) -> double* {
  303. void* aligned = data;
  304. ::size_t length = 64;
  305. std::align(32, 4 * sizeof(double), aligned, length);
  306. return static_cast<double*>(aligned);
  307. };
  308. double resData[8];
  309. double* ftRes = alignVec(resData);
  310. double* resa = (double*) &resultsa;
  311. double* resb = (double*) &resultsb;
  312. _mm256_store_pd(ftRes, counter);
  313. for (int k = 0; k < 4 && i + k < info.bWidth; k++) {
  314. if (info.smooth)
  315. data[i + k + j * info.bWidth] = ftRes[k] <= 0 ? info.maxIter :
  316. ftRes[k] >= info.maxIter ? info.maxIter :
  317. ((float)ftRes[k]) + 1 - ::log(::log(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::log(2.0f);
  318. else
  319. data[i + k + j * info.bWidth] = ftRes[k] > 0 ? float(ftRes[k]) : info.maxIter;
  320. }
  321. }
  322. }
  323. }