CpuGeneratorsNeon.cpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421
  1. #include "CpuGenerators.h"
  2. #include "LightDoubleDouble.h"
  3. #include <omp.h>
  4. #include <arm_neon.h>
  5. #include <memory>
  6. using mnd::CpuGenerator;
  7. namespace mnd
  8. {
  9. template class CpuGenerator<float, mnd::ARM_NEON, false>;
  10. template class CpuGenerator<float, mnd::ARM_NEON, true>;
  11. template class CpuGenerator<double, mnd::ARM_NEON, false>;
  12. template class CpuGenerator<double, mnd::ARM_NEON, true>;
  13. template class CpuGenerator<mnd::DoubleDouble, mnd::ARM_NEON, false>;
  14. template class CpuGenerator<mnd::DoubleDouble, mnd::ARM_NEON, true>;
  15. }
  16. template<bool parallel>
  17. void CpuGenerator<float, mnd::ARM_NEON, parallel>::generate(const mnd::MandelInfo& info, float* data)
  18. {
  19. using T = float;
  20. const MandelViewport& view = info.view;
  21. float32x4_t juliaX = vmovq_n_f32(double(info.juliaX));
  22. float32x4_t juliaY = vmovq_n_f32(double(info.juliaY));
  23. #if defined(_OPENMP)
  24. if constexpr(parallel)
  25. omp_set_num_threads(omp_get_num_procs());
  26. # pragma omp parallel for schedule(static, 1) if (parallel)
  27. #endif
  28. for (long j = 0; j < info.bHeight; j++) {
  29. T y = T(view.y) + T(j) * T(view.height / info.bHeight);
  30. long i = 0;
  31. for (i; i < info.bWidth; i += 4) {
  32. float xsvals[] = {
  33. float(view.x + float(i) * view.width / info.bWidth),
  34. float(view.x + float(i + 1) * view.width / info.bWidth),
  35. float(view.x + float(i + 2) * view.width / info.bWidth),
  36. float(view.x + float(i + 3) * view.width / info.bWidth)
  37. };
  38. float32x4_t xs = vld1q_f32(xsvals);
  39. uint32x4_t counter = vmovq_n_u32(0);
  40. uint32x4_t adder = vmovq_n_u32(1);
  41. //uint32x4_t ones = vmovq_n_u32(1);
  42. float32x4_t threshold = vmovq_n_f32(16);
  43. float32x4_t ys = vmovq_n_f32(y);
  44. float32x4_t a = xs;
  45. float32x4_t b = ys;
  46. float32x4_t cx = info.julia ? juliaX : xs;
  47. float32x4_t cy = info.julia ? juliaY : ys;
  48. for (int k = 0; k < info.maxIter; k++) {
  49. float32x4_t aa = vmulq_f32(a, a);
  50. float32x4_t bb = vmulq_f32(b, b);
  51. float32x4_t abab = vmulq_f32(a, b); abab = vaddq_f32(abab, abab);
  52. uint32x4_t cmp = vcleq_f32(vaddq_f32(aa, bb), threshold);
  53. if (info.smooth) {
  54. float32x4_t tempa = vaddq_f32(vsubq_f32(aa, bb), cx);
  55. float32x4_t tempb = vaddq_f32(abab, cy);
  56. a = vreinterpretq_f32_u32(vorrq_u32(vandq_u32(cmp, vreinterpretq_u32_f32(tempa)), vandq_u32(vmvnq_u32(cmp), vreinterpretq_u32_f32(a))));
  57. b = vreinterpretq_f32_u32(vorrq_u32(vandq_u32(cmp, vreinterpretq_u32_f32(tempb)), vandq_u32(vmvnq_u32(cmp), vreinterpretq_u32_f32(b))));
  58. }
  59. else {
  60. a = vaddq_f32(vsubq_f32(aa, bb), cx);
  61. b = vaddq_f32(abab, cy);
  62. }
  63. adder = vandq_u32(adder, cmp);
  64. counter = vaddq_u32(counter, adder);
  65. // checking for break criterion is possibly expensive, only do it every 8 iterations
  66. if ((k & 0x7) == 0) {
  67. /* // ARM-v7 method
  68. uint32x2_t allZero = vorr_u32(vget_low_u32(cmp), vget_high_u32(cmp));
  69. if (vget_lane_u32(vpmax_u32(allZero, allZero), 0) == 0) {
  70. break;
  71. }
  72. */
  73. uint32_t allZero = vaddvq_u32(cmp);
  74. if (allZero == 0) {
  75. break;
  76. }
  77. }
  78. }
  79. uint32_t resData[4];
  80. float resa[4];
  81. float resb[4];
  82. vst1q_u32(resData, counter);
  83. vst1q_f32(resa, a);
  84. vst1q_f32(resb, b);
  85. for (int k = 0; k < 4 && i + k < info.bWidth; k++) {
  86. if (info.smooth)
  87. data[i + k + j * info.bWidth] = resData[k] <= 0 ? info.maxIter :
  88. resData[k] >= info.maxIter ? info.maxIter :
  89. ((float) resData[k]) + 1 - ::logf(::logf(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::logf(2.0f);
  90. else
  91. data[i + k + j * info.bWidth] = resData[k] > 0 ? float(resData[k]) : info.maxIter;
  92. }
  93. }
  94. }
  95. }
  96. template<bool parallel>
  97. void CpuGenerator<double, mnd::ARM_NEON, parallel>::generate(const mnd::MandelInfo& info, float* data)
  98. {
  99. using T = double;
  100. const MandelViewport& view = info.view;
  101. float64x2_t juliaX = vmovq_n_f64(double(info.juliaX));
  102. float64x2_t juliaY = vmovq_n_f64(double(info.juliaY));
  103. #if defined(_OPENMP)
  104. if constexpr(parallel)
  105. omp_set_num_threads(omp_get_num_procs());
  106. # pragma omp parallel for schedule(static, 1) if (parallel)
  107. #endif
  108. for (long j = 0; j < info.bHeight; j++) {
  109. T y = T(view.y) + T(j) * T(view.height / info.bHeight);
  110. long i = 0;
  111. for (i; i < info.bWidth; i += 2) {
  112. double xsvals[] = {
  113. double(view.x + double(i) * view.width / info.bWidth),
  114. double(view.x + double(i + 1) * view.width / info.bWidth),
  115. };
  116. float64x2_t xs = vld1q_f64(xsvals);
  117. uint64x2_t counter = vmovq_n_u64(0);
  118. uint64x2_t adder = vmovq_n_u64(1);
  119. //uint32x4_t ones = vmovq_n_u32(1);
  120. float64x2_t threshold = vmovq_n_f64(16);
  121. float64x2_t ys = vmovq_n_f64(y);
  122. float64x2_t a = xs;
  123. float64x2_t b = ys;
  124. float64x2_t cx = info.julia ? juliaX : xs;
  125. float64x2_t cy = info.julia ? juliaY : ys;
  126. for (int k = 0; k < info.maxIter; k++) {
  127. float64x2_t aa = vmulq_f64(a, a);
  128. float64x2_t bb = vmulq_f64(b, b);
  129. float64x2_t abab = vmulq_f64(a, b); abab = vaddq_f64(abab, abab);
  130. //a = vaddq_f64(vsubq_f64(aa, bb), xs);
  131. //b = vaddq_f64(abab, ys);
  132. uint64x2_t cmp = vcleq_f64(vaddq_f64(aa, bb), threshold);
  133. if (info.smooth) {
  134. float64x2_t tempa = vaddq_f64(vsubq_f64(aa, bb), cx);
  135. float64x2_t tempb = vaddq_f64(abab, cy);
  136. a = vreinterpretq_f64_u64(vorrq_u64(vandq_u64(cmp, vreinterpretq_u64_f64(tempa)), vandq_u64(vreinterpretq_u64_u32(vmvnq_u32(vreinterpretq_u32_u64(cmp))), vreinterpretq_u64_f64(a))));
  137. b = vreinterpretq_f64_u64(vorrq_u64(vandq_u64(cmp, vreinterpretq_u64_f64(tempb)), vandq_u64(vreinterpretq_u64_u32(vmvnq_u32(vreinterpretq_u32_u64(cmp))), vreinterpretq_u64_f64(b))));
  138. }
  139. else {
  140. a = vaddq_f64(vsubq_f64(aa, bb), cx);
  141. b = vaddq_f64(abab, cy);
  142. }
  143. adder = vandq_u64(adder, cmp);
  144. counter = vaddq_u64(counter, adder);
  145. // checking for break criterion is possibly expensive, only do it every 8 iterations
  146. if ((k & 0x7) == 0) {
  147. /* // ARM-v7 method
  148. uint32x2_t allZero = vorr_u32(vget_low_u32(cmp), vget_high_u32(cmp));
  149. if (vget_lane_u32(vpmax_u32(allZero, allZero), 0) == 0) {
  150. break;
  151. }
  152. */
  153. uint64_t allZero = vaddvq_u64(cmp);
  154. if (allZero == 0) {
  155. break;
  156. }
  157. }
  158. }
  159. uint64_t resData[2];
  160. double resa[2];
  161. double resb[2];
  162. vst1q_u64(resData, counter);
  163. vst1q_f64(resa, a);
  164. vst1q_f64(resb, b);
  165. for (int k = 0; k < 2 && i + k < info.bWidth; k++) {
  166. if (info.smooth)
  167. data[i + k + j * info.bWidth] = resData[k] <= 0 ? info.maxIter :
  168. resData[k] >= info.maxIter ? info.maxIter :
  169. ((float) resData[k]) + 1 - ::logf(::logf(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::logf(2.0f);
  170. else
  171. data[i + k + j * info.bWidth] = resData[k] > 0 ? float(resData[k]) : info.maxIter;
  172. }
  173. }
  174. }
  175. }
  176. struct VecPair
  177. {
  178. float64x2_t a;
  179. float64x2_t b;
  180. };
  181. static inline VecPair quickTwoSum(float64x2_t a, float64x2_t b)
  182. {
  183. float64x2_t s = vaddq_f64(a, b);
  184. float64x2_t e = vsubq_f64(b, vsubq_f64(s, a));
  185. return { s, e };
  186. }
  187. static inline VecPair quickTwoDiff(float64x2_t a, float64x2_t b)
  188. {
  189. float64x2_t s = vsubq_f64(a, b);
  190. float64x2_t e = vsubq_f64(vsubq_f64(a, s), b);
  191. return { s, e };
  192. }
  193. static inline VecPair twoSum(float64x2_t a, float64x2_t b)
  194. {
  195. float64x2_t s = vaddq_f64(a, b);
  196. float64x2_t bb = vsubq_f64(s, a);
  197. float64x2_t e = vaddq_f64(vsubq_f64(a, vsubq_f64(s, bb)), vsubq_f64(b, bb));
  198. return { s, e };
  199. }
  200. static inline VecPair twoDiff(float64x2_t a, float64x2_t b)
  201. {
  202. float64x2_t s = vsubq_f64(a, b);
  203. float64x2_t bb = vsubq_f64(s, a);
  204. float64x2_t e = vsubq_f64(vsubq_f64(a, vsubq_f64(s, bb)), vaddq_f64(b, bb));
  205. return { s, e };
  206. }
  207. static inline VecPair split(float64x2_t a)
  208. {
  209. /*
  210. // -- this should never happen when doing mandelbrot calculations,
  211. // so we omit this check.
  212. if (a > _QD_SPLIT_THRESH || a < -_QD_SPLIT_THRESH) {
  213. a *= 3.7252902984619140625e-09; // 2^-28
  214. temp = _QD_SPLITTER * a;
  215. hi = temp - (temp - a);
  216. lo = a - hi;
  217. hi *= 268435456.0; // 2^28
  218. lo *= 268435456.0; // 2^28
  219. } else {
  220. temp = _QD_SPLITTER * a;
  221. hi = temp - (temp - a);
  222. lo = a - hi;
  223. }
  224. */
  225. static const float64x2_t SPLITTER = vmovq_n_f64(134217729.0);
  226. float64x2_t temp = vmulq_f64(SPLITTER, a);
  227. float64x2_t hi = vsubq_f64(temp, vsubq_f64(temp, a));
  228. float64x2_t lo = vsubq_f64(a, hi);
  229. return { hi, lo };
  230. }
  231. static inline VecPair twoProd(float64x2_t a, float64x2_t b)
  232. {
  233. float64x2_t p = vmulq_f64(a, b);
  234. auto[a_hi, a_lo] = split(a);
  235. auto[b_hi, b_lo] = split(b);
  236. float64x2_t err = vaddq_f64(vaddq_f64(vsubq_f64(vmulq_f64(a_hi, b_hi), p), vaddq_f64(vmulq_f64(a_hi, b_lo), vmulq_f64(a_lo, b_hi))), vmulq_f64(a_lo, b_lo));
  237. return { p, err };
  238. }
  239. struct NeonDoubleDouble
  240. {
  241. float64x2_t x[2];
  242. inline NeonDoubleDouble(const float64x2_t& a, const float64x2_t& b) :
  243. x{ a, b }
  244. {}
  245. inline NeonDoubleDouble(double a, double b) :
  246. x{ vmovq_n_f64(a), vmovq_n_f64(b) }
  247. {}
  248. inline NeonDoubleDouble operator + (const NeonDoubleDouble& sm) const
  249. {
  250. auto[s, e] = twoSum(x[0], sm.x[0]);
  251. e = vaddq_f64(e, vaddq_f64(x[1], sm.x[1]));
  252. auto[r1, r2] = quickTwoSum(s, e);
  253. return NeonDoubleDouble{ r1, r2 };
  254. }
  255. inline NeonDoubleDouble operator - (const NeonDoubleDouble& sm) const
  256. {
  257. auto[s, e] = twoDiff(x[0], sm.x[0]);
  258. e = vaddq_f64(e, x[1]);
  259. e = vsubq_f64(e, sm.x[1]);
  260. auto[r1, r2] = quickTwoSum(s, e);
  261. return NeonDoubleDouble{ r1, r2 };
  262. }
  263. inline NeonDoubleDouble operator * (const NeonDoubleDouble& sm) const
  264. {
  265. auto[p1, p2] = twoProd(this->x[0], sm.x[0]);
  266. p2 = vaddq_f64(p2,
  267. vaddq_f64(vmulq_f64(sm.x[1], x[0]), vmulq_f64(sm.x[0], x[1])) );
  268. auto[r1, r2] = quickTwoSum(p1, p2);
  269. return NeonDoubleDouble{ r1, r2 };
  270. }
  271. };
  272. template<bool parallel>
  273. void CpuGenerator<mnd::DoubleDouble, mnd::ARM_NEON, parallel>::generate(const mnd::MandelInfo& info, float* data)
  274. {
  275. const MandelViewport& view = info.view;
  276. using T = LightDoubleDouble;
  277. T viewx = mnd::convert<T>(view.x);
  278. T viewy = mnd::convert<T>(view.y);
  279. T wpp = mnd::convert<T>(view.width / info.bWidth);
  280. T hpp = mnd::convert<T>(view.height / info.bHeight);
  281. T jX = mnd::convert<T>(info.juliaX);
  282. T jY = mnd::convert<T>(info.juliaY);
  283. NeonDoubleDouble juliaX = { jX[0], jX[1] };
  284. NeonDoubleDouble juliaY = { jY[0], jY[1] };
  285. #if defined(_OPENMP)
  286. if constexpr(parallel)
  287. omp_set_num_threads(omp_get_num_procs());
  288. # pragma omp parallel for schedule(static, 1) if (parallel)
  289. #endif
  290. for (long j = 0; j < info.bHeight; j++) {
  291. T y = viewy + T(double(j)) * hpp;
  292. NeonDoubleDouble ys{ y[0], y[1] };
  293. for (long i = 0; i < info.bWidth; i += 2) {
  294. T x1 = viewx + T(double(i)) * wpp;
  295. T x2 = x1 + wpp;
  296. double xarr1[] = { x1[0], x2[0] };
  297. double xarr2[] = { x1[1], x2[1] };
  298. float64x2_t x1s = vld1q_f64(xarr1);
  299. float64x2_t x2s = vld1q_f64(xarr2);
  300. NeonDoubleDouble xs{ x1s, x2s };
  301. NeonDoubleDouble cx = info.julia ? juliaX : xs;
  302. NeonDoubleDouble cy = info.julia ? juliaY : ys;
  303. float64x2_t threshold = vmovq_n_f64(16.0);
  304. uint64x2_t counter = vmovq_n_u64(0);
  305. uint64x2_t adder = vmovq_n_u64(1);
  306. NeonDoubleDouble a = xs;
  307. NeonDoubleDouble b = ys;
  308. float64x2_t resultA = a.x[0];
  309. float64x2_t resultB = b.x[0];
  310. float64x2_t resultsa = vmovq_n_f64(0);
  311. float64x2_t resultsb = vmovq_n_f64(0);
  312. for (int k = 0; k < info.maxIter; k++) {
  313. NeonDoubleDouble aa = a * a;
  314. NeonDoubleDouble bb = b * b;
  315. NeonDoubleDouble abab = a * b; abab = abab + abab;
  316. a = aa - bb + cx;
  317. b = abab + cy;
  318. uint64x2_t cmp = vcleq_f64(vaddq_f64(aa.x[0], bb.x[0]), threshold);
  319. if (info.smooth) {
  320. resultA = vreinterpretq_f64_u64(vorrq_u64(vandq_u64(cmp, vreinterpretq_u64_f64(a.x[0])), vandq_u64(vreinterpretq_u64_u32(vmvnq_u32(vreinterpretq_u32_u64(cmp))), vreinterpretq_u64_f64(resultA))));
  321. resultB = vreinterpretq_f64_u64(vorrq_u64(vandq_u64(cmp, vreinterpretq_u64_f64(b.x[0])), vandq_u64(vreinterpretq_u64_u32(vmvnq_u32(vreinterpretq_u32_u64(cmp))), vreinterpretq_u64_f64(resultB))));
  322. }
  323. a = aa - bb + cx;
  324. b = abab + cy;
  325. adder = vandq_u64(adder, cmp);
  326. counter = vaddq_u64(counter, adder);
  327. // checking for break criterion is possibly expensive, only do it every 8 iterations
  328. if ((k & 0x7) == 0) {
  329. /* // ARM-v7 method
  330. uint32x2_t allZero = vorr_u32(vget_low_u32(cmp), vget_high_u32(cmp));
  331. if (vget_lane_u32(vpmax_u32(allZero, allZero), 0) == 0) {
  332. break;
  333. }
  334. */
  335. uint64_t allZero = vaddvq_u64(cmp);
  336. if (allZero == 0) {
  337. break;
  338. }
  339. }
  340. }
  341. uint64_t resData[2];
  342. double resa[2];
  343. double resb[2];
  344. vst1q_u64(resData, counter);
  345. vst1q_f64(resa, resultA);
  346. vst1q_f64(resb, resultB);
  347. for (int k = 0; k < 2 && i + k < info.bWidth; k++) {
  348. if (info.smooth)
  349. data[i + k + j * info.bWidth] = resData[k] <= 0 ? info.maxIter :
  350. resData[k] >= info.maxIter ? info.maxIter :
  351. ((float) resData[k]) + 1 - ::logf(::logf(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::logf(2.0f);
  352. else
  353. data[i + k + j * info.bWidth] = resData[k] > 0 ? float(resData[k]) : info.maxIter;
  354. }
  355. }
  356. }
  357. }