CpuGeneratorsSSE2.cpp 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. #include "CpuGenerators.h"
  2. #include <immintrin.h>
  3. #include <omp.h>
  4. #include <memory>
  5. using mnd::CpuGenerator;
  6. namespace mnd
  7. {
  8. template class CpuGenerator<float, mnd::X86_SSE2, false>;
  9. template class CpuGenerator<float, mnd::X86_SSE2, true>;
  10. template class CpuGenerator<double, mnd::X86_SSE2, false>;
  11. template class CpuGenerator<double, mnd::X86_SSE2, true>;
  12. }
  13. template<bool parallel>
  14. void CpuGenerator<float, mnd::X86_SSE2, parallel>::generate(const mnd::MandelInfo& info, float* data)
  15. {
  16. using T = float;
  17. const MandelViewport& view = info.view;
  18. const float dppf = float(view.width / info.bWidth);
  19. const float viewxf = float(view.x);
  20. __m128 viewx = { viewxf, viewxf, viewxf, viewxf };
  21. __m128 dpp = { dppf, dppf, dppf, dppf };
  22. T jX = mnd::convert<T>(info.juliaX);
  23. T jY = mnd::convert<T>(info.juliaY);
  24. __m128 juliaX = { jX, jX, jX, jX };
  25. __m128 juliaY = { jY, jY, jY, jY };
  26. #if defined(_OPENMP)
  27. if constexpr(parallel)
  28. omp_set_num_threads(omp_get_num_procs());
  29. # pragma omp parallel for schedule(static, 1) if (parallel)
  30. #endif
  31. for (long j = 0; j < info.bHeight; j++) {
  32. T y = T(view.y) + T(j) * T(view.height / info.bHeight);
  33. __m128 ys = {y, y, y, y};
  34. for (long i = 0; i < info.bWidth; i += 8) {
  35. __m128 pixc = { float(i), float(i + 1), float(i + 2), float(i + 3) };
  36. __m128 pixc2 = { float(i + 4), float(i + 5), float(i + 6), float(i + 7) };
  37. __m128 xs = _mm_add_ps(_mm_mul_ps(dpp, pixc), viewx);
  38. __m128 xs2 = _mm_add_ps(_mm_mul_ps(dpp, pixc2), viewx);
  39. __m128 counter = { 0, 0, 0, 0 };
  40. __m128 adder = { 1, 1, 1, 1 };
  41. __m128 counter2 = { 0, 0, 0, 0 };
  42. __m128 adder2 = { 1, 1, 1, 1 };
  43. __m128 threshold = { 16.0f, 16.0f, 16.0f, 16.0f };
  44. __m128 a = xs;
  45. __m128 b = ys;
  46. __m128 a2 = xs2;
  47. __m128 b2 = ys;
  48. __m128 cx = xs;
  49. __m128 cy = ys;
  50. __m128 cx2 = xs2;
  51. if (info.julia) {
  52. cx = juliaX;
  53. cx2 = juliaX;
  54. cy = juliaY;
  55. }
  56. __m128 resulta = { 0, 0, 0, 0 };
  57. __m128 resultb = { 0, 0, 0, 0 };
  58. __m128 resulta2 = { 0, 0, 0, 0 };
  59. __m128 resultb2 = { 0, 0, 0, 0 };
  60. __m128 cmp = _mm_cmple_ps(threshold, threshold);
  61. __m128 cmp2 = _mm_cmple_ps(threshold, threshold);
  62. for (int k = 0; k < info.maxIter; k++) {
  63. __m128 aa = _mm_mul_ps(a, a);
  64. __m128 aa2 = _mm_mul_ps(a2, a2);
  65. __m128 bb = _mm_mul_ps(b, b);
  66. __m128 bb2 = _mm_mul_ps(b2, b2);
  67. __m128 abab = _mm_mul_ps(a, b); abab = _mm_add_ps(abab, abab);
  68. __m128 abab2 = _mm_mul_ps(a2, b2); abab2 = _mm_add_ps(abab2, abab2);
  69. a = _mm_add_ps(_mm_sub_ps(aa, bb), cx);
  70. b = _mm_add_ps(abab, cy);
  71. a2 = _mm_add_ps(_mm_sub_ps(aa2, bb2), cx2);
  72. b2 = _mm_add_ps(abab2, cy);
  73. if (info.smooth) {
  74. resulta = _mm_or_ps(_mm_andnot_ps(cmp, resulta), _mm_and_ps(cmp, a));
  75. resultb = _mm_or_ps(_mm_andnot_ps(cmp, resultb), _mm_and_ps(cmp, b));
  76. resulta2 = _mm_or_ps(_mm_andnot_ps(cmp2, resulta2), _mm_and_ps(cmp2, a2));
  77. resultb2 = _mm_or_ps(_mm_andnot_ps(cmp2, resultb2), _mm_and_ps(cmp2, b2));
  78. }
  79. cmp = _mm_cmple_ps(_mm_add_ps(aa, bb), threshold);
  80. cmp2 = _mm_cmple_ps(_mm_add_ps(aa2, bb2), threshold);
  81. adder = _mm_and_ps(adder, cmp);
  82. counter = _mm_add_ps(counter, adder);
  83. adder2 = _mm_and_ps(adder2, cmp2);
  84. counter2 = _mm_add_ps(counter2, adder2);
  85. if ((k & 0x7) == 0 && _mm_movemask_epi8(_mm_castps_si128(cmp)) == 0 &&
  86. _mm_movemask_epi8(_mm_castps_si128(cmp2)) == 0) {
  87. break;
  88. }
  89. }
  90. auto alignVec = [](float* data) -> float* {
  91. void* aligned = data;
  92. ::size_t length = 64;
  93. std::align(32, 8 * sizeof(float), aligned, length);
  94. return static_cast<float*>(aligned);
  95. };
  96. float resData[64];
  97. float* ftRes = alignVec(resData);
  98. float* resa = ftRes + 8;
  99. float* resb = ftRes + 16;
  100. _mm_store_ps(ftRes, counter);
  101. _mm_store_ps(ftRes + 4, counter2);
  102. _mm_store_ps(resa, resulta);
  103. _mm_store_ps(resa + 4, resulta2);
  104. _mm_store_ps(resb, resultb);
  105. _mm_store_ps(resb + 4, resultb2);
  106. for (int k = 0; k < 8 && i + k < info.bWidth; k++) {
  107. if (info.smooth)
  108. data[i + k + j * info.bWidth] = ftRes[k] < 0 ? info.maxIter :
  109. ftRes[k] >= info.maxIter ? info.maxIter :
  110. ((float)ftRes[k]) + 1 - ::logf(::logf(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::logf(2.0f);
  111. else
  112. data[i + k + j * info.bWidth] = ftRes[k] >= 0 ? float(ftRes[k]) : info.maxIter;
  113. }
  114. }
  115. }
  116. }
  117. template<bool parallel>
  118. void CpuGenerator<double, mnd::X86_SSE2, parallel>::generate(const mnd::MandelInfo& info, float* data)
  119. {
  120. using T = double;
  121. const MandelViewport& view = info.view;
  122. const double dppf = double(view.width / info.bWidth);
  123. const double viewxf = double(view.x);
  124. __m128d viewx = { viewxf, viewxf };
  125. __m128d dpp = { dppf, dppf };
  126. T jX = mnd::convert<T>(info.juliaX);
  127. T jY = mnd::convert<T>(info.juliaY);
  128. __m128d juliaX = { jX, jX };
  129. __m128d juliaY = { jY, jY };
  130. #if defined(_OPENMP)
  131. if constexpr(parallel)
  132. omp_set_num_threads(omp_get_num_procs());
  133. # pragma omp parallel for schedule(static, 1) if (parallel)
  134. #endif
  135. for (long j = 0; j < info.bHeight; j++) {
  136. T y = T(view.y) + T(j) * T(view.height / info.bHeight);
  137. __m128d ys = { y, y };
  138. for (long i = 0; i < info.bWidth; i += 4) {
  139. __m128d pixc = { double(i), double(i + 1) };
  140. __m128d pixc2 = { double(i + 2), double(i + 3) };
  141. __m128d xs = _mm_add_pd(_mm_mul_pd(dpp, pixc), viewx);
  142. __m128d xs2 = _mm_add_pd(_mm_mul_pd(dpp, pixc2), viewx);
  143. __m128d counter = { 0, 0 };
  144. __m128d adder = { 1, 1 };
  145. __m128d counter2 = { 0, 0 };
  146. __m128d adder2 = { 1, 1 };
  147. __m128d threshold = { 16.0f, 16.0f };
  148. __m128d a = xs;
  149. __m128d b = ys;
  150. __m128d a2 = xs2;
  151. __m128d b2 = ys;
  152. __m128d cx = xs;
  153. __m128d cy = ys;
  154. __m128d cx2 = xs2;
  155. if (info.julia) {
  156. cx = juliaX;
  157. cx2 = juliaX;
  158. cy = juliaY;
  159. }
  160. __m128d resulta = { 0, 0 };
  161. __m128d resultb = { 0, 0 };
  162. __m128d resulta2 = { 0, 0 };
  163. __m128d resultb2 = { 0, 0 };
  164. __m128d cmp = _mm_cmple_pd(threshold, threshold);
  165. __m128d cmp2 = _mm_cmple_pd(threshold, threshold);
  166. for (int k = 0; k < info.maxIter; k++) {
  167. __m128d aa = _mm_mul_pd(a, a);
  168. __m128d aa2 = _mm_mul_pd(a2, a2);
  169. __m128d bb = _mm_mul_pd(b, b);
  170. __m128d bb2 = _mm_mul_pd(b2, b2);
  171. __m128d abab = _mm_mul_pd(a, b); abab = _mm_add_pd(abab, abab);
  172. __m128d abab2 = _mm_mul_pd(a2, b2); abab2 = _mm_add_pd(abab2, abab2);
  173. a = _mm_add_pd(_mm_sub_pd(aa, bb), cx);
  174. b = _mm_add_pd(abab, cy);
  175. a2 = _mm_add_pd(_mm_sub_pd(aa2, bb2), cx2);
  176. b2 = _mm_add_pd(abab2, cy);
  177. if (info.smooth) {
  178. resulta = _mm_or_pd(_mm_andnot_pd(cmp, resulta), _mm_and_pd(cmp, a));
  179. resultb = _mm_or_pd(_mm_andnot_pd(cmp, resultb), _mm_and_pd(cmp, b));
  180. resulta2 = _mm_or_pd(_mm_andnot_pd(cmp2, resulta2), _mm_and_pd(cmp2, a2));
  181. resultb2 = _mm_or_pd(_mm_andnot_pd(cmp2, resultb2), _mm_and_pd(cmp2, b2));
  182. }
  183. cmp = _mm_cmple_pd(_mm_add_pd(aa, bb), threshold);
  184. cmp2 = _mm_cmple_pd(_mm_add_pd(aa2, bb2), threshold);
  185. adder = _mm_and_pd(adder, cmp);
  186. counter = _mm_add_pd(counter, adder);
  187. adder2 = _mm_and_pd(adder2, cmp2);
  188. counter2 = _mm_add_pd(counter2, adder2);
  189. if (((k & 0x7) == 0) && _mm_movemask_epi8(_mm_castpd_si128(cmp)) == 0 &&
  190. _mm_movemask_epi8(_mm_castpd_si128(cmp2)) == 0) {
  191. break;
  192. }
  193. }
  194. double ftRes[24];
  195. double* resa = ftRes + 4;
  196. double* resb = ftRes + 8;
  197. _mm_storeu_pd(ftRes, counter);
  198. _mm_storeu_pd(ftRes + 2, counter2);
  199. _mm_storeu_pd(resa, resulta);
  200. _mm_storeu_pd(resa + 2, resulta2);
  201. _mm_storeu_pd(resb, resultb);
  202. _mm_storeu_pd(resb + 2, resultb2);
  203. for (int k = 0; k < 4 && i + k < info.bWidth; k++) {
  204. if (info.smooth)
  205. data[i + k + j * info.bWidth] = ftRes[k] < 0 ? info.maxIter :
  206. ftRes[k] >= info.maxIter ? info.maxIter :
  207. ((float)ftRes[k]) + 1 - ::logf(::logf(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::logf(2.0f);
  208. else
  209. data[i + k + j * info.bWidth] = ftRes[k] >= 0 ? float(ftRes[k]) : info.maxIter;
  210. }
  211. }
  212. }
  213. }