CpuGeneratorsSSE2.cpp 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. #include "CpuGenerators.h"
  2. #include <immintrin.h>
  3. #include <omp.h>
  4. #include <memory>
  5. using mnd::CpuGenerator;
  6. namespace mnd
  7. {
  8. template class CpuGenerator<float, mnd::X86_SSE2, false, false>;
  9. template class CpuGenerator<float, mnd::X86_SSE2, false, true>;
  10. template class CpuGenerator<float, mnd::X86_SSE2, true, false>;
  11. template class CpuGenerator<float, mnd::X86_SSE2, true, true>;
  12. template class CpuGenerator<double, mnd::X86_SSE2, false, false>;
  13. template class CpuGenerator<double, mnd::X86_SSE2, false, true>;
  14. template class CpuGenerator<double, mnd::X86_SSE2, true, false>;
  15. template class CpuGenerator<double, mnd::X86_SSE2, true, true>;
  16. }
  17. template<bool parallel, bool smooth>
  18. void CpuGenerator<float, mnd::X86_SSE2, parallel, smooth>::generate(const mnd::MandelInfo& info, float* data)
  19. {
  20. using T = float;
  21. const MandelViewport& view = info.view;
  22. if constexpr(parallel)
  23. omp_set_num_threads(2 * omp_get_num_procs());
  24. #pragma omp parallel for if (parallel)
  25. for (long j = 0; j < info.bHeight; j++) {
  26. T y = T(view.y) + T(j) * T(view.height / info.bHeight);
  27. long i = 0;
  28. for (i; i < info.bWidth; i += 4) {
  29. __m128 xs = {
  30. float(view.x + double(i) * view.width / info.bWidth),
  31. float(view.x + double(i + 1) * view.width / info.bWidth),
  32. float(view.x + double(i + 2) * view.width / info.bWidth),
  33. float(view.x + double(i + 3) * view.width / info.bWidth)
  34. };
  35. __m128 counter = {0, 0, 0, 0};
  36. __m128 adder = {1, 1, 1, 1};
  37. __m128 threshold = {16.0f, 16.0f, 16.0f, 16.0f};
  38. __m128 ys = {y, y, y, y};
  39. __m128 a = xs;
  40. __m128 b = ys;
  41. for (int k = 0; k < info.maxIter; k++) {
  42. __m128 aa = _mm_mul_ps(a, a);
  43. __m128 bb = _mm_mul_ps(b, b);
  44. __m128 abab = _mm_mul_ps(a, b); abab = _mm_add_ps(abab, abab);
  45. a = _mm_add_ps(_mm_sub_ps(aa, bb), xs);
  46. b = _mm_add_ps(abab, ys);
  47. __m128 cmp = _mm_cmple_ps(_mm_add_ps(aa, bb), threshold);
  48. adder = _mm_and_ps(adder, cmp);
  49. counter = _mm_add_ps(counter, adder);
  50. if (_mm_movemask_epi8(_mm_castps_si128(cmp)) == 0) {
  51. break;
  52. }
  53. }
  54. auto alignVec = [](float* data) -> float* {
  55. void* aligned = data;
  56. ::size_t length = 64;
  57. std::align(32, 8 * sizeof(float), aligned, length);
  58. return static_cast<float*>(aligned);
  59. };
  60. float resData[16];
  61. float* ftRes = alignVec(resData);
  62. _mm_store_ps(ftRes, counter);
  63. for (int k = 0; k < 4 && i + k < info.bWidth; k++)
  64. data[i + k + j * info.bWidth] = ftRes[k] > 0 ? ftRes[k] : info.maxIter;
  65. }
  66. }
  67. }
  68. template<bool parallel, bool smooth>
  69. void CpuGenerator<double, mnd::X86_SSE2, parallel, smooth>::generate(const mnd::MandelInfo& info, float* data)
  70. {
  71. using T = double;
  72. const MandelViewport& view = info.view;
  73. if constexpr(parallel)
  74. omp_set_num_threads(2 * omp_get_num_procs());
  75. #pragma omp parallel for if (parallel)
  76. for (long j = 0; j < info.bHeight; j++) {
  77. T y = T(view.y) + T(j) * T(view.height / info.bHeight);
  78. long i = 0;
  79. for (i; i < info.bWidth; i += 2) {
  80. __m128d xs = {
  81. double(view.x + double(i) * view.width / info.bWidth),
  82. double(view.x + double(i + 1) * view.width / info.bWidth)
  83. };
  84. __m128d counter = {0, 0};
  85. __m128d adder = {1, 1};
  86. __m128d threshold = {16.0f, 16.0f};
  87. __m128d ys = {y, y};
  88. __m128d a = xs;
  89. __m128d b = ys;
  90. for (int k = 0; k < info.maxIter; k++) {
  91. __m128d aa = _mm_mul_pd(a, a);
  92. __m128d bb = _mm_mul_pd(b, b);
  93. __m128d abab = _mm_mul_pd(a, b); abab = _mm_add_pd(abab, abab);
  94. a = _mm_add_pd(_mm_sub_pd(aa, bb), xs);
  95. b = _mm_add_pd(abab, ys);
  96. __m128d cmp = _mm_cmple_pd(_mm_add_pd(aa, bb), threshold);
  97. adder = _mm_and_pd(adder, cmp);
  98. counter = _mm_add_pd(counter, adder);
  99. if (_mm_movemask_epi8(_mm_castpd_si128(cmp)) == 0) {
  100. break;
  101. }
  102. }
  103. auto alignVec = [](double* data) -> double* {
  104. void* aligned = data;
  105. ::size_t length = 64;
  106. std::align(32, 4 * sizeof(double), aligned, length);
  107. return static_cast<double*>(aligned);
  108. };
  109. double resData[8];
  110. double* ftRes = alignVec(resData);
  111. _mm_store_pd(ftRes, counter);
  112. for (int k = 0; k < 2 && i + k < info.bWidth; k++)
  113. data[i + k + j * info.bWidth] = ftRes[k] > 0 ? ftRes[k] : info.maxIter;
  114. }
  115. }
  116. }