23#include <freerdp/config.h>
25#include <winpr/sysinfo.h>
27#include <freerdp/types.h>
28#include <freerdp/primitives.h>
30#include "prim_internal.h"
33#if defined(NEON_INTRINSICS_ENABLED)
38static INLINE uint8x8_t neon_YUV2R_single(uint16x8_t C, int16x8_t D, int16x8_t E)
41 const int32x4_t Ch = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(C)));
42 const int32x4_t e403h = vmull_n_s16(vget_high_s16(E), 403);
43 const int32x4_t cehm = vaddq_s32(Ch, e403h);
44 const int32x4_t ceh = vshrq_n_s32(cehm, 8);
46 const int32x4_t Cl = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(C)));
47 const int32x4_t e403l = vmull_n_s16(vget_low_s16(E), 403);
48 const int32x4_t celm = vaddq_s32(Cl, e403l);
49 const int32x4_t cel = vshrq_n_s32(celm, 8);
50 const int16x8_t ce = vcombine_s16(vqmovn_s32(cel), vqmovn_s32(ceh));
51 return vqmovun_s16(ce);
54static INLINE uint8x8x2_t neon_YUV2R(uint16x8x2_t C, int16x8x2_t D, int16x8x2_t E)
56 uint8x8x2_t res = { { neon_YUV2R_single(C.val[0], D.val[0], E.val[0]),
57 neon_YUV2R_single(C.val[1], D.val[1], E.val[1]) } };
61static INLINE uint8x8_t neon_YUV2G_single(uint16x8_t C, int16x8_t D, int16x8_t E)
64 const int16x8_t d48 = vmulq_n_s16(D, 48);
65 const int16x8_t e120 = vmulq_n_s16(E, 120);
66 const int32x4_t deh = vaddl_s16(vget_high_s16(d48), vget_high_s16(e120));
67 const int32x4_t Ch = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(C)));
68 const int32x4_t cdeh32m = vsubq_s32(Ch, deh);
69 const int32x4_t cdeh32 = vshrq_n_s32(cdeh32m, 8);
70 const int16x4_t cdeh = vqmovn_s32(cdeh32);
72 const int32x4_t del = vaddl_s16(vget_low_s16(d48), vget_low_s16(e120));
73 const int32x4_t Cl = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(C)));
74 const int32x4_t cdel32m = vsubq_s32(Cl, del);
75 const int32x4_t cdel32 = vshrq_n_s32(cdel32m, 8);
76 const int16x4_t cdel = vqmovn_s32(cdel32);
77 const int16x8_t cde = vcombine_s16(cdel, cdeh);
78 return vqmovun_s16(cde);
81static INLINE uint8x8x2_t neon_YUV2G(uint16x8x2_t C, int16x8x2_t D, int16x8x2_t E)
83 uint8x8x2_t res = { { neon_YUV2G_single(C.val[0], D.val[0], E.val[0]),
84 neon_YUV2G_single(C.val[1], D.val[1], E.val[1]) } };
88static INLINE uint8x8_t neon_YUV2B_single(uint16x8_t C, int16x8_t D, int16x8_t E)
91 const int32x4_t Ch = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(C)));
92 const int32x4_t d475h = vmull_n_s16(vget_high_s16(D), 475);
93 const int32x4_t cdhm = vaddq_s32(Ch, d475h);
94 const int32x4_t cdh = vshrq_n_s32(cdhm, 8);
96 const int32x4_t Cl = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(C)));
97 const int32x4_t d475l = vmull_n_s16(vget_low_s16(D), 475);
98 const int32x4_t cdlm = vaddq_s32(Cl, d475l);
99 const int32x4_t cdl = vshrq_n_s32(cdlm, 8);
100 const int16x8_t cd = vcombine_s16(vqmovn_s32(cdl), vqmovn_s32(cdh));
101 return vqmovun_s16(cd);
104static INLINE uint8x8x2_t neon_YUV2B(uint16x8x2_t C, int16x8x2_t D, int16x8x2_t E)
106 uint8x8x2_t res = { { neon_YUV2B_single(C.val[0], D.val[0], E.val[0]),
107 neon_YUV2B_single(C.val[1], D.val[1], E.val[1]) } };
111static inline void neon_store_bgrx(BYTE* WINPR_RESTRICT pRGB, uint8x8_t r, uint8x8_t g, uint8x8_t b,
112 uint8_t rPos, uint8_t gPos, uint8_t bPos, uint8_t aPos)
114 uint8x8x4_t bgrx = vld4_u8(pRGB);
121static INLINE
void neon_YuvToRgbPixel(BYTE* pRGB, uint8x8x2_t Y, int16x8x2_t D, int16x8x2_t E,
122 const uint8_t rPos,
const uint8_t gPos,
const uint8_t bPos,
126 const uint16x8x2_t C = { { vshlq_n_u16(vmovl_u8(Y.val[0]), 8),
127 vshlq_n_u16(vmovl_u8(Y.val[1]), 8) } };
129 const uint8x8x2_t r = neon_YUV2R(C, D, E);
130 const uint8x8x2_t g = neon_YUV2G(C, D, E);
131 const uint8x8x2_t b = neon_YUV2B(C, D, E);
133 neon_store_bgrx(pRGB, r.val[0], g.val[0], b.val[0], rPos, gPos, bPos, aPos);
134 neon_store_bgrx(pRGB +
sizeof(uint8x8x4_t), r.val[1], g.val[1], b.val[1], rPos, gPos, bPos,
138static inline int16x8x2_t loadUV(
const BYTE* WINPR_RESTRICT pV,
size_t x)
140 const uint8x8_t Vraw = vld1_u8(&pV[x / 2]);
141 const int16x8_t V = vreinterpretq_s16_u16(vmovl_u8(Vraw));
142 const int16x8_t c128 = vdupq_n_s16(128);
143 const int16x8_t E = vsubq_s16(V, c128);
144 return vzipq_s16(E, E);
147static INLINE
void neon_write_pixel(BYTE* pRGB, BYTE Y, BYTE U, BYTE V,
const uint8_t rPos,
148 const uint8_t gPos,
const uint8_t bPos,
const uint8_t aPos)
150 const BYTE r = YUV2R(Y, U, V);
151 const BYTE g = YUV2G(Y, U, V);
152 const BYTE b = YUV2B(Y, U, V);
159static INLINE pstatus_t neon_YUV420ToX_DOUBLE_ROW(
const BYTE* WINPR_RESTRICT pY[2],
160 const BYTE* WINPR_RESTRICT pU,
161 const BYTE* WINPR_RESTRICT pV,
162 BYTE* WINPR_RESTRICT pRGB[2],
size_t width,
163 const uint8_t rPos,
const uint8_t gPos,
164 const uint8_t bPos,
const uint8_t aPos)
166 WINPR_ASSERT((width % 2) == 0);
170 for (; x < width - width % 16; x += 16)
172 const uint8x16_t Y0raw = vld1q_u8(&pY[0][x]);
173 const uint8x8x2_t Y0 = { { vget_low_u8(Y0raw), vget_high_u8(Y0raw) } };
174 const int16x8x2_t D = loadUV(pU, x);
175 const int16x8x2_t E = loadUV(pV, x);
176 neon_YuvToRgbPixel(&pRGB[0][4ULL * x], Y0, D, E, rPos, gPos, bPos, aPos);
178 const uint8x16_t Y1raw = vld1q_u8(&pY[1][x]);
179 const uint8x8x2_t Y1 = { { vget_low_u8(Y1raw), vget_high_u8(Y1raw) } };
180 neon_YuvToRgbPixel(&pRGB[1][4ULL * x], Y1, D, E, rPos, gPos, bPos, aPos);
183 for (; x < width; x += 2)
185 const BYTE U = pU[x / 2];
186 const BYTE V = pV[x / 2];
188 neon_write_pixel(&pRGB[0][4 * x], pY[0][x], U, V, rPos, gPos, bPos, aPos);
189 neon_write_pixel(&pRGB[0][4 * (1ULL + x)], pY[0][1ULL + x], U, V, rPos, gPos, bPos, aPos);
190 neon_write_pixel(&pRGB[1][4 * x], pY[1][x], U, V, rPos, gPos, bPos, aPos);
191 neon_write_pixel(&pRGB[1][4 * (1ULL + x)], pY[1][1ULL + x], U, V, rPos, gPos, bPos, aPos);
194 return PRIMITIVES_SUCCESS;
197static INLINE pstatus_t neon_YUV420ToX(
const BYTE* WINPR_RESTRICT pSrc[3],
const UINT32 srcStep[3],
198 BYTE* WINPR_RESTRICT pDst, UINT32 dstStep,
199 const prim_size_t* WINPR_RESTRICT roi,
const uint8_t rPos,
200 const uint8_t gPos,
const uint8_t bPos,
const uint8_t aPos)
202 const UINT32 nWidth = roi->width;
203 const UINT32 nHeight = roi->height;
205 WINPR_ASSERT((nHeight % 2) == 0);
206 for (UINT32 y = 0; y < nHeight; y += 2)
208 const uint8_t* pY[2] = { pSrc[0] + y * srcStep[0], pSrc[0] + (1ULL + y) * srcStep[0] };
209 const uint8_t* pU = pSrc[1] + (y / 2) * srcStep[1];
210 const uint8_t* pV = pSrc[2] + (y / 2) * srcStep[2];
211 uint8_t* pRGB[2] = { pDst + y * dstStep, pDst + (1ULL + y) * dstStep };
214 neon_YUV420ToX_DOUBLE_ROW(pY, pU, pV, pRGB, nWidth, rPos, gPos, bPos, aPos);
215 if (rc != PRIMITIVES_SUCCESS)
219 return PRIMITIVES_SUCCESS;
222static pstatus_t neon_YUV420ToRGB_8u_P3AC4R(
const BYTE* WINPR_RESTRICT pSrc[3],
223 const UINT32 srcStep[3], BYTE* WINPR_RESTRICT pDst,
224 UINT32 dstStep, UINT32 DstFormat,
229 case PIXEL_FORMAT_BGRA32:
230 case PIXEL_FORMAT_BGRX32:
231 return neon_YUV420ToX(pSrc, srcStep, pDst, dstStep, roi, 2, 1, 0, 3);
233 case PIXEL_FORMAT_RGBA32:
234 case PIXEL_FORMAT_RGBX32:
235 return neon_YUV420ToX(pSrc, srcStep, pDst, dstStep, roi, 0, 1, 2, 3);
237 case PIXEL_FORMAT_ARGB32:
238 case PIXEL_FORMAT_XRGB32:
239 return neon_YUV420ToX(pSrc, srcStep, pDst, dstStep, roi, 1, 2, 3, 0);
241 case PIXEL_FORMAT_ABGR32:
242 case PIXEL_FORMAT_XBGR32:
243 return neon_YUV420ToX(pSrc, srcStep, pDst, dstStep, roi, 3, 2, 1, 0);
246 return generic->YUV420ToRGB_8u_P3AC4R(pSrc, srcStep, pDst, dstStep, DstFormat, roi);
250static inline int16x8_t loadUVreg(uint8x8_t Vraw)
252 const int16x8_t V = vreinterpretq_s16_u16(vmovl_u8(Vraw));
253 const int16x8_t c128 = vdupq_n_s16(128);
254 const int16x8_t E = vsubq_s16(V, c128);
258static inline int16x8x2_t loadUV444(uint8x16_t Vld)
260 const uint8x8x2_t V = { { vget_low_u8(Vld), vget_high_u8(Vld) } };
261 const int16x8x2_t res = { {
268static inline void avgUV(BYTE U[2][2])
270 const BYTE u00 = U[0][0];
271 const INT16 umul = (INT16)u00 << 2;
272 const INT16 sum = (INT16)U[0][1] + U[1][0] + U[1][1];
273 const INT16 wavg = umul - sum;
274 const BYTE val = CONDITIONAL_CLIP(wavg, u00);
278static inline void neon_avgUV(uint8x16_t pU[2])
282 const uint8x16x2_t usplit = vuzpq_u8(pU[0], pU[1]);
283 const uint8x16_t ueven = usplit.val[0];
284 const uint8x16_t uodd = usplit.val[1];
286 const uint8x8_t u00 = vget_low_u8(ueven);
287 const uint8x8_t u01 = vget_low_u8(uodd);
288 const uint8x8_t u10 = vget_high_u8(ueven);
289 const uint8x8_t u11 = vget_high_u8(uodd);
292 const uint16x8_t uoddsum = vaddl_u8(u01, u10);
293 const uint16x8_t usum = vaddq_u16(uoddsum, vmovl_u8(u11));
296 const uint16x8_t umul = vshll_n_u8(u00, 2);
299 const int16x8_t wavg = vsubq_s16(vreinterpretq_s16_u16(umul), vreinterpretq_s16_u16(usum));
300 const uint8x8_t avg = vqmovun_s16(wavg);
303 const uint8x8_t absdiff = vabd_u8(avg, u00);
306 const uint8x8_t mask = vclt_u8(absdiff, vdup_n_u8(30));
309 const uint8x8_t out1 = vand_u8(u00, mask);
312 const uint8x8_t notmask = vmvn_u8(mask);
315 const uint8x8_t out2 = vand_u8(avg, notmask);
318 const uint8x8_t out = vorr_u8(out1, out2);
320 const uint8x8x2_t ua = vzip_u8(out, u01);
321 const uint8x16_t u = vcombine_u8(ua.val[0], ua.val[1]);
325static INLINE pstatus_t neon_YUV444ToX_SINGLE_ROW(
const BYTE* WINPR_RESTRICT pY,
326 const BYTE* WINPR_RESTRICT pU,
327 const BYTE* WINPR_RESTRICT pV,
328 BYTE* WINPR_RESTRICT pRGB,
size_t width,
329 const uint8_t rPos,
const uint8_t gPos,
330 const uint8_t bPos,
const uint8_t aPos)
332 WINPR_ASSERT(width % 2 == 0);
336 for (; x < width - width % 16; x += 16)
338 uint8x16_t U = vld1q_u8(&pU[x]);
339 uint8x16_t V = vld1q_u8(&pV[x]);
340 const uint8x16_t Y0raw = vld1q_u8(&pY[x]);
341 const uint8x8x2_t Y0 = { { vget_low_u8(Y0raw), vget_high_u8(Y0raw) } };
342 const int16x8x2_t D0 = loadUV444(U);
343 const int16x8x2_t E0 = loadUV444(V);
344 neon_YuvToRgbPixel(&pRGB[4ULL * x], Y0, D0, E0, rPos, gPos, bPos, aPos);
347 for (; x < width; x += 2)
349 BYTE* rgb = &pRGB[x * 4];
351 for (
size_t j = 0; j < 2; j++)
353 const BYTE y = pY[x + j];
354 const BYTE u = pU[x + j];
355 const BYTE v = pV[x + j];
357 neon_write_pixel(&rgb[4 * (j)], y, u, v, rPos, gPos, bPos, aPos);
361 return PRIMITIVES_SUCCESS;
364static INLINE pstatus_t neon_YUV444ToX_DOUBLE_ROW(
const BYTE* WINPR_RESTRICT pY[2],
365 const BYTE* WINPR_RESTRICT pU[2],
366 const BYTE* WINPR_RESTRICT pV[2],
367 BYTE* WINPR_RESTRICT pRGB[2],
size_t width,
368 const uint8_t rPos,
const uint8_t gPos,
369 const uint8_t bPos,
const uint8_t aPos)
371 WINPR_ASSERT(width % 2 == 0);
375 for (; x < width - width % 16; x += 16)
377 uint8x16_t U[2] = { vld1q_u8(&pU[0][x]), vld1q_u8(&pU[1][x]) };
380 uint8x16_t V[2] = { vld1q_u8(&pV[0][x]), vld1q_u8(&pV[1][x]) };
383 const uint8x16_t Y0raw = vld1q_u8(&pY[0][x]);
384 const uint8x8x2_t Y0 = { { vget_low_u8(Y0raw), vget_high_u8(Y0raw) } };
385 const int16x8x2_t D0 = loadUV444(U[0]);
386 const int16x8x2_t E0 = loadUV444(V[0]);
387 neon_YuvToRgbPixel(&pRGB[0][4ULL * x], Y0, D0, E0, rPos, gPos, bPos, aPos);
389 const uint8x16_t Y1raw = vld1q_u8(&pY[1][x]);
390 const uint8x8x2_t Y1 = { { vget_low_u8(Y1raw), vget_high_u8(Y1raw) } };
391 const int16x8x2_t D1 = loadUV444(U[1]);
392 const int16x8x2_t E1 = loadUV444(V[1]);
393 neon_YuvToRgbPixel(&pRGB[1][4ULL * x], Y1, D1, E1, rPos, gPos, bPos, aPos);
396 for (; x < width; x += 2)
398 BYTE* rgb[2] = { &pRGB[0][x * 4], &pRGB[1][x * 4] };
399 BYTE U[2][2] = { { pU[0][x], pU[0][x + 1] }, { pU[1][x], pU[1][x + 1] } };
402 BYTE V[2][2] = { { pV[0][x], pV[0][x + 1] }, { pV[1][x], pV[1][x + 1] } };
405 for (
size_t i = 0; i < 2; i++)
407 for (
size_t j = 0; j < 2; j++)
409 const BYTE y = pY[i][x + j];
410 const BYTE u = U[i][j];
411 const BYTE v = V[i][j];
413 neon_write_pixel(&rgb[i][4 * (j)], y, u, v, rPos, gPos, bPos, aPos);
418 return PRIMITIVES_SUCCESS;
421static INLINE pstatus_t neon_YUV444ToX(
const BYTE* WINPR_RESTRICT pSrc[3],
const UINT32 srcStep[3],
422 BYTE* WINPR_RESTRICT pDst, UINT32 dstStep,
423 const prim_size_t* WINPR_RESTRICT roi,
const uint8_t rPos,
424 const uint8_t gPos,
const uint8_t bPos,
const uint8_t aPos)
427 const UINT32 nWidth = roi->width;
428 const UINT32 nHeight = roi->height;
431 for (; y < nHeight - nHeight % 2; y += 2)
433 const uint8_t* WINPR_RESTRICT pY[2] = { pSrc[0] + y * srcStep[0],
434 pSrc[0] + (y + 1) * srcStep[0] };
435 const uint8_t* WINPR_RESTRICT pU[2] = { pSrc[1] + y * srcStep[1],
436 pSrc[1] + (y + 1) * srcStep[1] };
437 const uint8_t* WINPR_RESTRICT pV[2] = { pSrc[2] + y * srcStep[2],
438 pSrc[2] + (y + 1) * srcStep[2] };
440 uint8_t* WINPR_RESTRICT pRGB[2] = { &pDst[y * dstStep], &pDst[(y + 1) * dstStep] };
443 neon_YUV444ToX_DOUBLE_ROW(pY, pU, pV, pRGB, nWidth, rPos, gPos, bPos, aPos);
444 if (rc != PRIMITIVES_SUCCESS)
447 for (; y < nHeight; y++)
449 const uint8_t* WINPR_RESTRICT pY = pSrc[0] + y * srcStep[0];
450 const uint8_t* WINPR_RESTRICT pU = pSrc[1] + y * srcStep[1];
451 const uint8_t* WINPR_RESTRICT pV = pSrc[2] + y * srcStep[2];
452 uint8_t* WINPR_RESTRICT pRGB = &pDst[y * dstStep];
455 neon_YUV444ToX_SINGLE_ROW(pY, pU, pV, pRGB, nWidth, rPos, gPos, bPos, aPos);
456 if (rc != PRIMITIVES_SUCCESS)
460 return PRIMITIVES_SUCCESS;
463static pstatus_t neon_YUV444ToRGB_8u_P3AC4R(
const BYTE* WINPR_RESTRICT pSrc[3],
464 const UINT32 srcStep[3], BYTE* WINPR_RESTRICT pDst,
465 UINT32 dstStep, UINT32 DstFormat,
470 case PIXEL_FORMAT_BGRA32:
471 case PIXEL_FORMAT_BGRX32:
472 return neon_YUV444ToX(pSrc, srcStep, pDst, dstStep, roi, 2, 1, 0, 3);
474 case PIXEL_FORMAT_RGBA32:
475 case PIXEL_FORMAT_RGBX32:
476 return neon_YUV444ToX(pSrc, srcStep, pDst, dstStep, roi, 0, 1, 2, 3);
478 case PIXEL_FORMAT_ARGB32:
479 case PIXEL_FORMAT_XRGB32:
480 return neon_YUV444ToX(pSrc, srcStep, pDst, dstStep, roi, 1, 2, 3, 0);
482 case PIXEL_FORMAT_ABGR32:
483 case PIXEL_FORMAT_XBGR32:
484 return neon_YUV444ToX(pSrc, srcStep, pDst, dstStep, roi, 3, 2, 1, 0);
487 return generic->YUV444ToRGB_8u_P3AC4R(pSrc, srcStep, pDst, dstStep, DstFormat, roi);
491static pstatus_t neon_LumaToYUV444(
const BYTE* WINPR_RESTRICT pSrcRaw[3],
const UINT32 srcStep[3],
492 BYTE* WINPR_RESTRICT pDstRaw[3],
const UINT32 dstStep[3],
495 const UINT32 nWidth = roi->right - roi->left;
496 const UINT32 nHeight = roi->bottom - roi->top;
497 const UINT32 halfWidth = (nWidth + 1) / 2;
498 const UINT32 halfHeight = (nHeight + 1) / 2;
499 const UINT32 evenY = 0;
500 const BYTE* pSrc[3] = { pSrcRaw[0] + roi->top * srcStep[0] + roi->left,
501 pSrcRaw[1] + roi->top / 2 * srcStep[1] + roi->left / 2,
502 pSrcRaw[2] + roi->top / 2 * srcStep[2] + roi->left / 2 };
503 BYTE* pDst[3] = { pDstRaw[0] + roi->top * dstStep[0] + roi->left,
504 pDstRaw[1] + roi->top * dstStep[1] + roi->left,
505 pDstRaw[2] + roi->top * dstStep[2] + roi->left };
509 for (UINT32 y = 0; y < nHeight; y++)
511 const BYTE* Ym = pSrc[0] + srcStep[0] * y;
512 BYTE* pY = pDst[0] + dstStep[0] * y;
513 memcpy(pY, Ym, nWidth);
518 for (UINT32 y = 0; y < halfHeight; y++)
520 const UINT32 val2y = (2 * y + evenY);
521 const BYTE* Um = pSrc[1] + srcStep[1] * y;
522 const BYTE* Vm = pSrc[2] + srcStep[2] * y;
523 BYTE* pU = pDst[1] + dstStep[1] * val2y;
524 BYTE* pV = pDst[2] + dstStep[2] * val2y;
525 BYTE* pU1 = pU + dstStep[1];
526 BYTE* pV1 = pV + dstStep[2];
529 for (; x + 16 < halfWidth; x += 16)
532 const uint8x16_t u = vld1q_u8(Um);
543 const uint8x16_t v = vld1q_u8(Vm);
555 for (; x < halfWidth; x++)
557 const BYTE u = *Um++;
558 const BYTE v = *Vm++;
570 return PRIMITIVES_SUCCESS;
573static pstatus_t neon_ChromaV1ToYUV444(
const BYTE* WINPR_RESTRICT pSrcRaw[3],
574 const UINT32 srcStep[3], BYTE* WINPR_RESTRICT pDstRaw[3],
575 const UINT32 dstStep[3],
578 const UINT32 mod = 16;
581 const UINT32 nWidth = roi->right - roi->left;
582 const UINT32 nHeight = roi->bottom - roi->top;
583 const UINT32 halfWidth = (nWidth) / 2;
584 const UINT32 halfHeight = (nHeight) / 2;
585 const UINT32 oddY = 1;
586 const UINT32 evenY = 0;
587 const UINT32 oddX = 1;
590 const UINT32 padHeigth = nHeight + 16 - nHeight % 16;
591 const UINT32 halfPad = halfWidth % 16;
592 const BYTE* pSrc[3] = { pSrcRaw[0] + roi->top * srcStep[0] + roi->left,
593 pSrcRaw[1] + roi->top / 2 * srcStep[1] + roi->left / 2,
594 pSrcRaw[2] + roi->top / 2 * srcStep[2] + roi->left / 2 };
595 BYTE* pDst[3] = { pDstRaw[0] + roi->top * dstStep[0] + roi->left,
596 pDstRaw[1] + roi->top * dstStep[1] + roi->left,
597 pDstRaw[2] + roi->top * dstStep[2] + roi->left };
601 for (UINT32 y = 0; y < padHeigth; y++)
603 const BYTE* Ya = pSrc[0] + srcStep[0] * y;
606 if ((y) % mod < (mod + 1) / 2)
608 const UINT32 pos = (2 * uY++ + oddY);
613 pX = pDst[1] + dstStep[1] * pos;
617 const UINT32 pos = (2 * vY++ + oddY);
622 pX = pDst[2] + dstStep[2] * pos;
625 memcpy(pX, Ya, nWidth);
629 for (UINT32 y = 0; y < halfHeight; y++)
631 const UINT32 val2y = (y * 2 + evenY);
632 const BYTE* Ua = pSrc[1] + srcStep[1] * y;
633 const BYTE* Va = pSrc[2] + srcStep[2] * y;
634 BYTE* pU = pDst[1] + dstStep[1] * val2y;
635 BYTE* pV = pDst[2] + dstStep[2] * val2y;
638 for (; x < halfWidth - halfPad; x += 16)
641 uint8x16x2_t u = vld2q_u8(&pU[2 * x]);
642 u.val[1] = vld1q_u8(&Ua[x]);
643 vst2q_u8(&pU[2 * x], u);
646 uint8x16x2_t v = vld2q_u8(&pV[2 * x]);
647 v.val[1] = vld1q_u8(&Va[x]);
648 vst2q_u8(&pV[2 * x], v);
652 for (; x < halfWidth; x++)
654 const UINT32 val2x1 = (x * 2 + oddX);
660 return PRIMITIVES_SUCCESS;
663static pstatus_t neon_ChromaV2ToYUV444(
const BYTE* WINPR_RESTRICT pSrc[3],
const UINT32 srcStep[3],
664 UINT32 nTotalWidth, UINT32 nTotalHeight,
665 BYTE* WINPR_RESTRICT pDst[3],
const UINT32 dstStep[3],
668 const UINT32 nWidth = roi->right - roi->left;
669 const UINT32 nHeight = roi->bottom - roi->top;
670 const UINT32 halfWidth = (nWidth + 1) / 2;
671 const UINT32 halfPad = halfWidth % 16;
672 const UINT32 halfHeight = (nHeight + 1) / 2;
673 const UINT32 quaterWidth = (nWidth + 3) / 4;
674 const UINT32 quaterPad = quaterWidth % 16;
677 for (UINT32 y = 0; y < nHeight; y++)
679 const UINT32 yTop = y + roi->top;
680 const BYTE* pYaU = pSrc[0] + srcStep[0] * yTop + roi->left / 2;
681 const BYTE* pYaV = pYaU + nTotalWidth / 2;
682 BYTE* pU = pDst[1] + dstStep[1] * yTop + roi->left;
683 BYTE* pV = pDst[2] + dstStep[2] * yTop + roi->left;
686 for (; x < halfWidth - halfPad; x += 16)
689 uint8x16x2_t u = vld2q_u8(&pU[2 * x]);
690 u.val[1] = vld1q_u8(&pYaU[x]);
691 vst2q_u8(&pU[2 * x], u);
694 uint8x16x2_t v = vld2q_u8(&pV[2 * x]);
695 v.val[1] = vld1q_u8(&pYaV[x]);
696 vst2q_u8(&pV[2 * x], v);
700 for (; x < halfWidth; x++)
702 const UINT32 odd = 2 * x + 1;
709 for (UINT32 y = 0; y < halfHeight; y++)
711 const BYTE* pUaU = pSrc[1] + srcStep[1] * (y + roi->top / 2) + roi->left / 4;
712 const BYTE* pUaV = pUaU + nTotalWidth / 4;
713 const BYTE* pVaU = pSrc[2] + srcStep[2] * (y + roi->top / 2) + roi->left / 4;
714 const BYTE* pVaV = pVaU + nTotalWidth / 4;
715 BYTE* pU = pDst[1] + dstStep[1] * (2 * y + 1 + roi->top) + roi->left;
716 BYTE* pV = pDst[2] + dstStep[2] * (2 * y + 1 + roi->top) + roi->left;
719 for (; x < quaterWidth - quaterPad; x += 16)
722 uint8x16x4_t u = vld4q_u8(&pU[4 * x]);
723 u.val[0] = vld1q_u8(&pUaU[x]);
724 u.val[2] = vld1q_u8(&pVaU[x]);
725 vst4q_u8(&pU[4 * x], u);
728 uint8x16x4_t v = vld4q_u8(&pV[4 * x]);
729 v.val[0] = vld1q_u8(&pUaV[x]);
730 v.val[2] = vld1q_u8(&pVaV[x]);
731 vst4q_u8(&pV[4 * x], v);
735 for (; x < quaterWidth; x++)
737 pU[4 * x + 0] = pUaU[x];
738 pV[4 * x + 0] = pUaV[x];
739 pU[4 * x + 2] = pVaU[x];
740 pV[4 * x + 2] = pVaV[x];
744 return PRIMITIVES_SUCCESS;
747static pstatus_t neon_YUV420CombineToYUV444(avc444_frame_type type,
748 const BYTE* WINPR_RESTRICT pSrc[3],
749 const UINT32 srcStep[3], UINT32 nWidth, UINT32 nHeight,
750 BYTE* WINPR_RESTRICT pDst[3],
const UINT32 dstStep[3],
753 if (!pSrc || !pSrc[0] || !pSrc[1] || !pSrc[2])
756 if (!pDst || !pDst[0] || !pDst[1] || !pDst[2])
765 return neon_LumaToYUV444(pSrc, srcStep, pDst, dstStep, roi);
767 case AVC444_CHROMAv1:
768 return neon_ChromaV1ToYUV444(pSrc, srcStep, pDst, dstStep, roi);
770 case AVC444_CHROMAv2:
771 return neon_ChromaV2ToYUV444(pSrc, srcStep, nWidth, nHeight, pDst, dstStep, roi);
779void primitives_init_YUV_neon_int(
primitives_t* WINPR_RESTRICT prims)
781#if defined(NEON_INTRINSICS_ENABLED)
782 generic = primitives_get_generic();
783 WLog_VRB(PRIM_TAG,
"NEON optimizations");
784 prims->YUV420ToRGB_8u_P3AC4R = neon_YUV420ToRGB_8u_P3AC4R;
785 prims->YUV444ToRGB_8u_P3AC4R = neon_YUV444ToRGB_8u_P3AC4R;
786 prims->YUV420CombineToYUV444 = neon_YUV420CombineToYUV444;
788 WLog_VRB(PRIM_TAG,
"undefined WITH_SIMD or neon intrinsics not available");