CpuGeneratorsSSE2.cpp 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. #include "CpuGenerators.h"
  2. #include <immintrin.h>
  3. #include <omp.h>
  4. #include <memory>
  5. using mnd::CpuGeneratorSse2Float;
  6. using mnd::CpuGeneratorSse2Double;
  7. void CpuGeneratorSse2Float::generate(const mnd::MandelInfo& info, float* data)
  8. {
  9. using T = float;
  10. const MandelViewport& view = info.view;
  11. omp_set_num_threads(2 * omp_get_num_procs());
  12. #pragma omp parallel for
  13. for (long j = 0; j < info.bHeight; j++) {
  14. T y = T(view.y) + T(j) * T(view.height / info.bHeight);
  15. long i = 0;
  16. for (i; i < info.bWidth; i += 4) {
  17. __m128 xs = {
  18. float(view.x + double(i) * view.width / info.bWidth),
  19. float(view.x + double(i + 1) * view.width / info.bWidth),
  20. float(view.x + double(i + 2) * view.width / info.bWidth),
  21. float(view.x + double(i + 3) * view.width / info.bWidth)
  22. };
  23. __m128 counter = {0, 0, 0, 0};
  24. __m128 adder = {1, 1, 1, 1};
  25. __m128 threshold = {16.0f, 16.0f, 16.0f, 16.0f};
  26. __m128 ys = {y, y, y, y};
  27. __m128 a = xs;
  28. __m128 b = ys;
  29. for (int k = 0; k < info.maxIter; k++) {
  30. __m128 aa = _mm_mul_ps(a, a);
  31. __m128 bb = _mm_mul_ps(b, b);
  32. __m128 abab = _mm_mul_ps(a, b); abab = _mm_add_ps(abab, abab);
  33. a = _mm_add_ps(_mm_sub_ps(aa, bb), xs);
  34. b = _mm_add_ps(abab, ys);
  35. __m128 cmp = _mm_cmple_ps(_mm_add_ps(aa, bb), threshold);
  36. adder = _mm_and_ps(adder, cmp);
  37. counter = _mm_add_ps(counter, adder);
  38. if (_mm_movemask_epi8(_mm_castps_si128(cmp)) == 0) {
  39. break;
  40. }
  41. }
  42. auto alignVec = [](float* data) -> float* {
  43. void* aligned = data;
  44. ::size_t length = 64;
  45. std::align(32, 8 * sizeof(float), aligned, length);
  46. return static_cast<float*>(aligned);
  47. };
  48. float resData[16];
  49. float* ftRes = alignVec(resData);
  50. _mm_store_ps(ftRes, counter);
  51. for (int k = 0; k < 4 && i + k < info.bWidth; k++)
  52. data[i + k + j * info.bWidth] = ftRes[k] > 0 ? ftRes[k] : info.maxIter;
  53. }
  54. }
  55. }
  56. void CpuGeneratorSse2Double::generate(const mnd::MandelInfo& info, float* data)
  57. {
  58. using T = double;
  59. const MandelViewport& view = info.view;
  60. omp_set_num_threads(2 * omp_get_num_procs());
  61. #pragma omp parallel for
  62. for (long j = 0; j < info.bHeight; j++) {
  63. T y = T(view.y) + T(j) * T(view.height / info.bHeight);
  64. long i = 0;
  65. for (i; i < info.bWidth; i += 2) {
  66. __m128d xs = {
  67. double(view.x + double(i) * view.width / info.bWidth),
  68. double(view.x + double(i + 1) * view.width / info.bWidth)
  69. };
  70. __m128d counter = {0, 0};
  71. __m128d adder = {1, 1};
  72. __m128d threshold = {16.0f, 16.0f};
  73. __m128d ys = {y, y};
  74. __m128d a = xs;
  75. __m128d b = ys;
  76. for (int k = 0; k < info.maxIter; k++) {
  77. __m128d aa = _mm_mul_pd(a, a);
  78. __m128d bb = _mm_mul_pd(b, b);
  79. __m128d abab = _mm_mul_pd(a, b); abab = _mm_add_pd(abab, abab);
  80. a = _mm_add_pd(_mm_sub_pd(aa, bb), xs);
  81. b = _mm_add_pd(abab, ys);
  82. __m128d cmp = _mm_cmple_pd(_mm_add_pd(aa, bb), threshold);
  83. adder = _mm_and_pd(adder, cmp);
  84. counter = _mm_add_pd(counter, adder);
  85. if (_mm_movemask_epi8(_mm_castpd_si128(cmp)) == 0) {
  86. break;
  87. }
  88. }
  89. auto alignVec = [](double* data) -> double* {
  90. void* aligned = data;
  91. ::size_t length = 64;
  92. std::align(32, 4 * sizeof(double), aligned, length);
  93. return static_cast<double*>(aligned);
  94. };
  95. double resData[8];
  96. double* ftRes = alignVec(resData);
  97. _mm_store_pd(ftRes, counter);
  98. for (int k = 0; k < 2 && i + k < info.bWidth; k++)
  99. data[i + k + j * info.bWidth] = ftRes[k] > 0 ? ftRes[k] : info.maxIter;
  100. }
  101. }
  102. }