CpuGeneratorsSSE2.cpp 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. #include "CpuGenerators.h"
  2. #include <immintrin.h>
  3. #include <omp.h>
  4. #include <memory>
  5. using mnd::CpuGenerator;
  6. template class CpuGenerator<float, mnd::X86_SSE2, false, false>;
  7. template class CpuGenerator<float, mnd::X86_SSE2, false, true>;
  8. template class CpuGenerator<float, mnd::X86_SSE2, true, false>;
  9. template class CpuGenerator<float, mnd::X86_SSE2, true, true>;
  10. template class CpuGenerator<double, mnd::X86_SSE2, false, false>;
  11. template class CpuGenerator<double, mnd::X86_SSE2, false, true>;
  12. template class CpuGenerator<double, mnd::X86_SSE2, true, false>;
  13. template class CpuGenerator<double, mnd::X86_SSE2, true, true>;
  14. template<bool parallel, bool smooth>
  15. void CpuGenerator<float, mnd::X86_SSE2, parallel, smooth>::generate(const mnd::MandelInfo& info, float* data)
  16. {
  17. using T = float;
  18. const MandelViewport& view = info.view;
  19. if constexpr(parallel)
  20. omp_set_num_threads(2 * omp_get_num_procs());
  21. #pragma omp parallel for if (parallel)
  22. for (long j = 0; j < info.bHeight; j++) {
  23. T y = T(view.y) + T(j) * T(view.height / info.bHeight);
  24. long i = 0;
  25. for (i; i < info.bWidth; i += 4) {
  26. __m128 xs = {
  27. float(view.x + double(i) * view.width / info.bWidth),
  28. float(view.x + double(i + 1) * view.width / info.bWidth),
  29. float(view.x + double(i + 2) * view.width / info.bWidth),
  30. float(view.x + double(i + 3) * view.width / info.bWidth)
  31. };
  32. __m128 counter = {0, 0, 0, 0};
  33. __m128 adder = {1, 1, 1, 1};
  34. __m128 threshold = {16.0f, 16.0f, 16.0f, 16.0f};
  35. __m128 ys = {y, y, y, y};
  36. __m128 a = xs;
  37. __m128 b = ys;
  38. for (int k = 0; k < info.maxIter; k++) {
  39. __m128 aa = _mm_mul_ps(a, a);
  40. __m128 bb = _mm_mul_ps(b, b);
  41. __m128 abab = _mm_mul_ps(a, b); abab = _mm_add_ps(abab, abab);
  42. a = _mm_add_ps(_mm_sub_ps(aa, bb), xs);
  43. b = _mm_add_ps(abab, ys);
  44. __m128 cmp = _mm_cmple_ps(_mm_add_ps(aa, bb), threshold);
  45. adder = _mm_and_ps(adder, cmp);
  46. counter = _mm_add_ps(counter, adder);
  47. if (_mm_movemask_epi8(_mm_castps_si128(cmp)) == 0) {
  48. break;
  49. }
  50. }
  51. auto alignVec = [](float* data) -> float* {
  52. void* aligned = data;
  53. ::size_t length = 64;
  54. std::align(32, 8 * sizeof(float), aligned, length);
  55. return static_cast<float*>(aligned);
  56. };
  57. float resData[16];
  58. float* ftRes = alignVec(resData);
  59. _mm_store_ps(ftRes, counter);
  60. for (int k = 0; k < 4 && i + k < info.bWidth; k++)
  61. data[i + k + j * info.bWidth] = ftRes[k] > 0 ? ftRes[k] : info.maxIter;
  62. }
  63. }
  64. }
  65. template<bool parallel, bool smooth>
  66. void CpuGenerator<double, mnd::X86_SSE2, parallel, smooth>::generate(const mnd::MandelInfo& info, float* data)
  67. {
  68. using T = double;
  69. const MandelViewport& view = info.view;
  70. if constexpr(parallel)
  71. omp_set_num_threads(2 * omp_get_num_procs());
  72. #pragma omp parallel for if (parallel)
  73. for (long j = 0; j < info.bHeight; j++) {
  74. T y = T(view.y) + T(j) * T(view.height / info.bHeight);
  75. long i = 0;
  76. for (i; i < info.bWidth; i += 2) {
  77. __m128d xs = {
  78. double(view.x + double(i) * view.width / info.bWidth),
  79. double(view.x + double(i + 1) * view.width / info.bWidth)
  80. };
  81. __m128d counter = {0, 0};
  82. __m128d adder = {1, 1};
  83. __m128d threshold = {16.0f, 16.0f};
  84. __m128d ys = {y, y};
  85. __m128d a = xs;
  86. __m128d b = ys;
  87. for (int k = 0; k < info.maxIter; k++) {
  88. __m128d aa = _mm_mul_pd(a, a);
  89. __m128d bb = _mm_mul_pd(b, b);
  90. __m128d abab = _mm_mul_pd(a, b); abab = _mm_add_pd(abab, abab);
  91. a = _mm_add_pd(_mm_sub_pd(aa, bb), xs);
  92. b = _mm_add_pd(abab, ys);
  93. __m128d cmp = _mm_cmple_pd(_mm_add_pd(aa, bb), threshold);
  94. adder = _mm_and_pd(adder, cmp);
  95. counter = _mm_add_pd(counter, adder);
  96. if (_mm_movemask_epi8(_mm_castpd_si128(cmp)) == 0) {
  97. break;
  98. }
  99. }
  100. auto alignVec = [](double* data) -> double* {
  101. void* aligned = data;
  102. ::size_t length = 64;
  103. std::align(32, 4 * sizeof(double), aligned, length);
  104. return static_cast<double*>(aligned);
  105. };
  106. double resData[8];
  107. double* ftRes = alignVec(resData);
  108. _mm_store_pd(ftRes, counter);
  109. for (int k = 0; k < 2 && i + k < info.bWidth; k++)
  110. data[i + k + j * info.bWidth] = ftRes[k] > 0 ? ftRes[k] : info.maxIter;
  111. }
  112. }
  113. }