1
0

CpuGeneratorsAVXFMA.cpp 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. #include "CpuGenerators.h"
  2. #include <immintrin.h>
  3. #include <omp.h>
  4. #include <cmath>
  5. #include <utility>
  6. #include <memory>
  7. using mnd::CpuGenerator;
  8. namespace mnd
  9. {
  10. template class CpuGenerator<DoubleDouble, mnd::X86_AVX_FMA, false>;
  11. template class CpuGenerator<DoubleDouble, mnd::X86_AVX_FMA, true>;
  12. }
  13. struct VecPair
  14. {
  15. __m256d a;
  16. __m256d b;
  17. };
  18. static inline VecPair quickTwoSum(__m256d a, __m256d b)
  19. {
  20. __m256d s = _mm256_add_pd(a, b);
  21. __m256d e = _mm256_sub_pd(b, _mm256_sub_pd(s, a));
  22. return { s, e };
  23. }
  24. static inline VecPair quickTwoDiff(__m256d a, __m256d b)
  25. {
  26. __m256d s = _mm256_sub_pd(a, b);
  27. __m256d e = _mm256_sub_pd(_mm256_sub_pd(a, s), b);
  28. return { s, e };
  29. }
  30. static inline VecPair twoSum(__m256d a, __m256d b)
  31. {
  32. __m256d s = _mm256_add_pd(a, b);
  33. __m256d bb = _mm256_sub_pd(s, a);
  34. __m256d e = _mm256_add_pd(_mm256_sub_pd(a, _mm256_sub_pd(s, bb)), _mm256_sub_pd(b, bb));
  35. return { s, e };
  36. }
  37. static inline VecPair twoDiff(__m256d a, __m256d b)
  38. {
  39. __m256d s = _mm256_sub_pd(a, b);
  40. __m256d bb = _mm256_sub_pd(s, a);
  41. __m256d e = _mm256_sub_pd(_mm256_sub_pd(a, _mm256_sub_pd(s, bb)), _mm256_add_pd(b, bb));
  42. return { s, e };
  43. }
  44. /*
  45. static inline VecPair split(__m256d a)
  46. {
  47. static const __m256d SPLIT_THRESH = { 6.69692879491417e+299, 6.69692879491417e+299, 6.69692879491417e+299, 6.69692879491417e+299 };
  48. static const __m256d MINUS_SPLIT_THRESH = { -6.69692879491417e+299, -6.69692879491417e+299, -6.69692879491417e+299, -6.69692879491417e+299 };
  49. static const __m256d SPLITTER = { 134217729.0, 134217729.0, 134217729.0, 134217729.0};
  50. __m256d temp;
  51. __m256i cmp1 = _mm256_castpd_si256(_mm256_cmp_pd(a, SPLIT_THRESH, _CMP_GT_OQ));
  52. __m256i cmp2 = _mm256_castpd_si256(_mm256_cmp_pd(a, MINUS_SPLIT_THRESH, _CMP_LT_OQ));
  53. __m256i cmp = _mm256_or_si256
  54. }*/
  55. static inline VecPair twoProd(__m256d a, __m256d b)
  56. {
  57. //#ifdef CPUID_FMA
  58. __m256d p = _mm256_mul_pd(a, b);
  59. __m256d e = _mm256_fmsub_pd(a, b, p);
  60. return { p, e };
  61. //#else
  62. /* double a_hi, a_lo, b_hi, b_lo;
  63. __m256d p = _mm256_mul_ps(a, b);
  64. split(a, a_hi, a_lo);
  65. split(b, b_hi, b_lo);
  66. err = ((a_hi * b_hi - p) + a_hi * b_lo + a_lo * b_hi) + a_lo * b_lo;
  67. return p;*/
  68. //#endif
  69. }
  70. struct AvxDoubleDouble
  71. {
  72. __m256d x[2];
  73. inline AvxDoubleDouble(__m256d a, __m256d b) :
  74. x{ a, b }
  75. {}
  76. inline AvxDoubleDouble operator + (const AvxDoubleDouble& sm) const
  77. {
  78. auto[s, e] = twoSum(x[0], sm.x[0]);
  79. e = _mm256_add_pd(e, _mm256_add_pd(x[1], sm.x[1]));
  80. auto[r1, r2] = quickTwoSum(s, e);
  81. return AvxDoubleDouble{ r1, r2 };
  82. }
  83. inline AvxDoubleDouble operator - (const AvxDoubleDouble& sm) const
  84. {
  85. auto[s, e] = twoDiff(x[0], sm.x[0]);
  86. e = _mm256_add_pd(e, x[1]);
  87. e = _mm256_sub_pd(e, sm.x[1]);
  88. auto[r1, r2] = quickTwoSum(s, e);
  89. return AvxDoubleDouble{ r1, r2 };
  90. }
  91. inline AvxDoubleDouble operator * (const AvxDoubleDouble& sm) const
  92. {
  93. auto[p1, p2] = twoProd(this->x[0], sm.x[0]);
  94. p2 = _mm256_add_pd(p2,
  95. _mm256_add_pd(_mm256_mul_pd(sm.x[1], x[0]), _mm256_mul_pd(sm.x[0], x[1])) );
  96. auto[r1, r2] = quickTwoSum(p1, p2);
  97. return AvxDoubleDouble{ r1, r2 };
  98. }
  99. };
  100. template<bool parallel>
  101. void CpuGenerator<mnd::DoubleDouble, mnd::X86_AVX_FMA, parallel>::generate(const mnd::MandelInfo& info, float* data)
  102. {
  103. const MandelViewport& view = info.view;
  104. using T = DoubleDouble;
  105. T viewx = mnd::convert<T>(view.x);
  106. T viewy = mnd::convert<T>(view.y);
  107. T wpp = mnd::convert<T>(view.width / info.bWidth);
  108. T hpp = mnd::convert<T>(view.height / info.bHeight);
  109. // if constexpr(parallel)
  110. // omp_set_num_threads(2 * omp_get_num_procs());
  111. //#pragma omp parallel for schedule(static, 1) if (parallel)
  112. for (long j = 0; j < info.bHeight; j++) {
  113. T y = viewy + T(double(j)) * hpp;
  114. __m256d y0s = { y.x[0], y.x[0], y.x[0], y.x[0] };
  115. __m256d y1s = { y.x[1], y.x[1], y.x[1], y.x[1] };
  116. AvxDoubleDouble ys{ y0s, y1s };
  117. long i = 0;
  118. for (i; i < info.bWidth; i += 4) {
  119. T x1 = viewx + T(double(i)) * wpp;
  120. T x2 = x1 + wpp;
  121. T x3 = x2 + wpp;
  122. T x4 = x3 + wpp;
  123. __m256d x0s = {
  124. x1.x[0], x2.x[0], x3.x[0], x4.x[0],
  125. };
  126. __m256d x1s = {
  127. x1.x[1], x2.x[1], x3.x[1], x4.x[1],
  128. };
  129. AvxDoubleDouble xs{ x0s, x1s };
  130. int itRes[4] = { 0, 0, 0, 0 };
  131. __m256d threshold = { 16.0, 16.0, 16.0, 16.0 };
  132. __m256d counter = { 0, 0, 0, 0 };
  133. __m256d adder = { 1, 1, 1, 1 };
  134. AvxDoubleDouble a = xs;
  135. AvxDoubleDouble b = ys;
  136. for (int k = 0; k < info.maxIter; k++) {
  137. AvxDoubleDouble aa = a * a;
  138. AvxDoubleDouble bb = b * b;
  139. AvxDoubleDouble abab = a * b; abab = abab + abab;
  140. a = aa - bb + xs;
  141. b = abab + ys;
  142. __m256i cmp = _mm256_castpd_si256(_mm256_cmp_pd(_mm256_add_pd(aa.x[0], bb.x[0]), threshold, _CMP_LE_OQ));
  143. /*if (info.smooth) {
  144. resultsa = _mm256_or_pd(_mm256_andnot_ps(cmp, resultsa), _mm256_and_ps(cmp, a));
  145. resultsb = _mm256_or_ps(_mm256_andnot_ps(cmp, resultsb), _mm256_and_ps(cmp, b));
  146. }*/
  147. adder = _mm256_and_pd(adder, _mm256_castsi256_pd(cmp));
  148. counter = _mm256_add_pd(counter, adder);
  149. if (_mm256_testz_si256(cmp, cmp) != 0) {
  150. break;
  151. }
  152. }
  153. auto alignVec = [](double* data) -> double* {
  154. void* aligned = data;
  155. ::size_t length = 64;
  156. std::align(32, 4 * sizeof(double), aligned, length);
  157. return static_cast<double*>(aligned);
  158. };
  159. double resData[8];
  160. double* ftRes = alignVec(resData);
  161. _mm256_store_pd(ftRes, counter);
  162. for (int k = 0; k < 4 && i + k < info.bWidth; k++)
  163. data[i + k + j * info.bWidth] = ftRes[k] > 0 ? float(ftRes[k]) : info.maxIter;
  164. }
  165. }
  166. return;
  167. for (long j = 0; j < info.bHeight; j++) {
  168. T y = viewy + T(double(j)) * hpp;
  169. __m256d y0s = { y.x[0], y.x[0], y.x[0], y.x[0] };
  170. __m256d y1s = { y.x[1], y.x[1], y.x[1], y.x[1] };
  171. AvxDoubleDouble ys{ y0s, y1s };
  172. long i = 0;
  173. for (i; i < info.bWidth; i += 4) {
  174. T x1 = viewx + T(double(i)) * wpp;
  175. T x2 = viewx + T(double(i + 1)) * wpp;
  176. T x3 = viewx + T(double(i + 2)) * wpp;
  177. T x4 = viewx + T(double(i + 3)) * wpp;
  178. __m256d x0s = {
  179. x1.x[0], x2.x[0], x3.x[0], x4.x[0],
  180. };
  181. __m256d x1s = {
  182. x1.x[1], x2.x[1], x3.x[1], x4.x[1],
  183. };
  184. AvxDoubleDouble xs{ x0s, x1s };
  185. int itRes[4] = { 0, 0, 0, 0 };
  186. __m256d threshold = { 16.0, 16.0, 16.0, 16.0 };
  187. __m256d counter = { 0, 0, 0, 0 };
  188. __m256d adder = { 1, 1, 1, 1 };
  189. AvxDoubleDouble a = xs;
  190. AvxDoubleDouble b = ys;
  191. for (int k = 0; k < info.maxIter; k++) {
  192. AvxDoubleDouble aa = a * a;
  193. AvxDoubleDouble bb = b * b;
  194. AvxDoubleDouble abab = a * b; abab = abab + abab;
  195. a = aa - bb + xs;
  196. b = abab + ys;
  197. __m256i cmp = _mm256_castpd_si256(_mm256_cmp_pd(_mm256_add_pd(aa.x[0], bb.x[0]), threshold, _CMP_LE_OQ));
  198. /*if (info.smooth) {
  199. resultsa = _mm256_or_pd(_mm256_andnot_ps(cmp, resultsa), _mm256_and_ps(cmp, a));
  200. resultsb = _mm256_or_ps(_mm256_andnot_ps(cmp, resultsb), _mm256_and_ps(cmp, b));
  201. }*/
  202. adder = _mm256_and_pd(adder, _mm256_castsi256_pd(cmp));
  203. counter = _mm256_add_pd(counter, adder);
  204. if ((k & 7) == 0 && _mm256_testz_si256(cmp, cmp) != 0) {
  205. break;
  206. }
  207. }
  208. auto alignVec = [](double* data) -> double* {
  209. void* aligned = data;
  210. ::size_t length = 64;
  211. std::align(32, 4 * sizeof(double), aligned, length);
  212. return static_cast<double*>(aligned);
  213. };
  214. double resData[8];
  215. double* ftRes = alignVec(resData);
  216. _mm256_store_pd(ftRes, counter);
  217. for (int k = 0; k < 4 && i + k < info.bWidth; k++)
  218. data[i + k + j * info.bWidth] = ftRes[k] > 0 ? float(ftRes[k]) : info.maxIter;
  219. }
  220. }
  221. }