FreeRDP
Loading...
Searching...
No Matches
prim_YUV_neon.c
1
23#include <freerdp/config.h>
24
25#include <winpr/sysinfo.h>
26#include <winpr/crt.h>
27#include <freerdp/types.h>
28#include <freerdp/primitives.h>
29
30#include "prim_internal.h"
31#include "prim_YUV.h"
32
33#if defined(NEON_INTRINSICS_ENABLED)
34#include <arm_neon.h>
35
36static primitives_t* generic = NULL;
37
38static INLINE uint8x8_t neon_YUV2R_single(uint16x8_t C, int16x8_t D, int16x8_t E)
39{
40 /* R = (256 * Y + 403 * (V - 128)) >> 8 */
41 const int32x4_t Ch = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(C)));
42 const int32x4_t e403h = vmull_n_s16(vget_high_s16(E), 403);
43 const int32x4_t cehm = vaddq_s32(Ch, e403h);
44 const int32x4_t ceh = vshrq_n_s32(cehm, 8);
45
46 const int32x4_t Cl = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(C)));
47 const int32x4_t e403l = vmull_n_s16(vget_low_s16(E), 403);
48 const int32x4_t celm = vaddq_s32(Cl, e403l);
49 const int32x4_t cel = vshrq_n_s32(celm, 8);
50 const int16x8_t ce = vcombine_s16(vqmovn_s32(cel), vqmovn_s32(ceh));
51 return vqmovun_s16(ce);
52}
53
54static INLINE uint8x8x2_t neon_YUV2R(uint16x8x2_t C, int16x8x2_t D, int16x8x2_t E)
55{
56 uint8x8x2_t res = { { neon_YUV2R_single(C.val[0], D.val[0], E.val[0]),
57 neon_YUV2R_single(C.val[1], D.val[1], E.val[1]) } };
58 return res;
59}
60
61static INLINE uint8x8_t neon_YUV2G_single(uint16x8_t C, int16x8_t D, int16x8_t E)
62{
63 /* G = (256L * Y - 48 * (U - 128) - 120 * (V - 128)) >> 8 */
64 const int16x8_t d48 = vmulq_n_s16(D, 48);
65 const int16x8_t e120 = vmulq_n_s16(E, 120);
66 const int32x4_t deh = vaddl_s16(vget_high_s16(d48), vget_high_s16(e120));
67 const int32x4_t Ch = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(C)));
68 const int32x4_t cdeh32m = vsubq_s32(Ch, deh);
69 const int32x4_t cdeh32 = vshrq_n_s32(cdeh32m, 8);
70 const int16x4_t cdeh = vqmovn_s32(cdeh32);
71
72 const int32x4_t del = vaddl_s16(vget_low_s16(d48), vget_low_s16(e120));
73 const int32x4_t Cl = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(C)));
74 const int32x4_t cdel32m = vsubq_s32(Cl, del);
75 const int32x4_t cdel32 = vshrq_n_s32(cdel32m, 8);
76 const int16x4_t cdel = vqmovn_s32(cdel32);
77 const int16x8_t cde = vcombine_s16(cdel, cdeh);
78 return vqmovun_s16(cde);
79}
80
81static INLINE uint8x8x2_t neon_YUV2G(uint16x8x2_t C, int16x8x2_t D, int16x8x2_t E)
82{
83 uint8x8x2_t res = { { neon_YUV2G_single(C.val[0], D.val[0], E.val[0]),
84 neon_YUV2G_single(C.val[1], D.val[1], E.val[1]) } };
85 return res;
86}
87
88static INLINE uint8x8_t neon_YUV2B_single(uint16x8_t C, int16x8_t D, int16x8_t E)
89{
90 /* B = (256L * Y + 475 * (U - 128)) >> 8*/
91 const int32x4_t Ch = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(C)));
92 const int32x4_t d475h = vmull_n_s16(vget_high_s16(D), 475);
93 const int32x4_t cdhm = vaddq_s32(Ch, d475h);
94 const int32x4_t cdh = vshrq_n_s32(cdhm, 8);
95
96 const int32x4_t Cl = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(C)));
97 const int32x4_t d475l = vmull_n_s16(vget_low_s16(D), 475);
98 const int32x4_t cdlm = vaddq_s32(Cl, d475l);
99 const int32x4_t cdl = vshrq_n_s32(cdlm, 8);
100 const int16x8_t cd = vcombine_s16(vqmovn_s32(cdl), vqmovn_s32(cdh));
101 return vqmovun_s16(cd);
102}
103
104static INLINE uint8x8x2_t neon_YUV2B(uint16x8x2_t C, int16x8x2_t D, int16x8x2_t E)
105{
106 uint8x8x2_t res = { { neon_YUV2B_single(C.val[0], D.val[0], E.val[0]),
107 neon_YUV2B_single(C.val[1], D.val[1], E.val[1]) } };
108 return res;
109}
110
111static inline void neon_store_bgrx(BYTE* WINPR_RESTRICT pRGB, uint8x8_t r, uint8x8_t g, uint8x8_t b,
112 uint8_t rPos, uint8_t gPos, uint8_t bPos, uint8_t aPos)
113{
114 uint8x8x4_t bgrx = vld4_u8(pRGB);
115 bgrx.val[rPos] = r;
116 bgrx.val[gPos] = g;
117 bgrx.val[bPos] = b;
118 vst4_u8(pRGB, bgrx);
119}
120
121static INLINE void neon_YuvToRgbPixel(BYTE* pRGB, uint8x8x2_t Y, int16x8x2_t D, int16x8x2_t E,
122 const uint8_t rPos, const uint8_t gPos, const uint8_t bPos,
123 const uint8_t aPos)
124{
125 /* Y * 256 == Y << 8 */
126 const uint16x8x2_t C = { { vshlq_n_u16(vmovl_u8(Y.val[0]), 8),
127 vshlq_n_u16(vmovl_u8(Y.val[1]), 8) } };
128
129 const uint8x8x2_t r = neon_YUV2R(C, D, E);
130 const uint8x8x2_t g = neon_YUV2G(C, D, E);
131 const uint8x8x2_t b = neon_YUV2B(C, D, E);
132
133 neon_store_bgrx(pRGB, r.val[0], g.val[0], b.val[0], rPos, gPos, bPos, aPos);
134 neon_store_bgrx(pRGB + sizeof(uint8x8x4_t), r.val[1], g.val[1], b.val[1], rPos, gPos, bPos,
135 aPos);
136}
137
138static inline int16x8x2_t loadUV(const BYTE* WINPR_RESTRICT pV, size_t x)
139{
140 const uint8x8_t Vraw = vld1_u8(&pV[x / 2]);
141 const int16x8_t V = vreinterpretq_s16_u16(vmovl_u8(Vraw));
142 const int16x8_t c128 = vdupq_n_s16(128);
143 const int16x8_t E = vsubq_s16(V, c128);
144 return vzipq_s16(E, E);
145}
146
147static INLINE void neon_write_pixel(BYTE* pRGB, BYTE Y, BYTE U, BYTE V, const uint8_t rPos,
148 const uint8_t gPos, const uint8_t bPos, const uint8_t aPos)
149{
150 const BYTE r = YUV2R(Y, U, V);
151 const BYTE g = YUV2G(Y, U, V);
152 const BYTE b = YUV2B(Y, U, V);
153
154 pRGB[rPos] = r;
155 pRGB[gPos] = g;
156 pRGB[bPos] = b;
157}
158
159static INLINE pstatus_t neon_YUV420ToX_DOUBLE_ROW(const BYTE* WINPR_RESTRICT pY[2],
160 const BYTE* WINPR_RESTRICT pU,
161 const BYTE* WINPR_RESTRICT pV,
162 BYTE* WINPR_RESTRICT pRGB[2], size_t width,
163 const uint8_t rPos, const uint8_t gPos,
164 const uint8_t bPos, const uint8_t aPos)
165{
166 WINPR_ASSERT((width % 2) == 0);
167
168 UINT32 x = 0;
169
170 for (; x < width - width % 16; x += 16)
171 {
172 const uint8x16_t Y0raw = vld1q_u8(&pY[0][x]);
173 const uint8x8x2_t Y0 = { { vget_low_u8(Y0raw), vget_high_u8(Y0raw) } };
174 const int16x8x2_t D = loadUV(pU, x);
175 const int16x8x2_t E = loadUV(pV, x);
176 neon_YuvToRgbPixel(&pRGB[0][4ULL * x], Y0, D, E, rPos, gPos, bPos, aPos);
177
178 const uint8x16_t Y1raw = vld1q_u8(&pY[1][x]);
179 const uint8x8x2_t Y1 = { { vget_low_u8(Y1raw), vget_high_u8(Y1raw) } };
180 neon_YuvToRgbPixel(&pRGB[1][4ULL * x], Y1, D, E, rPos, gPos, bPos, aPos);
181 }
182
183 for (; x < width; x += 2)
184 {
185 const BYTE U = pU[x / 2];
186 const BYTE V = pV[x / 2];
187
188 neon_write_pixel(&pRGB[0][4 * x], pY[0][x], U, V, rPos, gPos, bPos, aPos);
189 neon_write_pixel(&pRGB[0][4 * (1ULL + x)], pY[0][1ULL + x], U, V, rPos, gPos, bPos, aPos);
190 neon_write_pixel(&pRGB[1][4 * x], pY[1][x], U, V, rPos, gPos, bPos, aPos);
191 neon_write_pixel(&pRGB[1][4 * (1ULL + x)], pY[1][1ULL + x], U, V, rPos, gPos, bPos, aPos);
192 }
193
194 return PRIMITIVES_SUCCESS;
195}
196
197static INLINE pstatus_t neon_YUV420ToX(const BYTE* WINPR_RESTRICT pSrc[3], const UINT32 srcStep[3],
198 BYTE* WINPR_RESTRICT pDst, UINT32 dstStep,
199 const prim_size_t* WINPR_RESTRICT roi, const uint8_t rPos,
200 const uint8_t gPos, const uint8_t bPos, const uint8_t aPos)
201{
202 const UINT32 nWidth = roi->width;
203 const UINT32 nHeight = roi->height;
204
205 WINPR_ASSERT((nHeight % 2) == 0);
206 for (UINT32 y = 0; y < nHeight; y += 2)
207 {
208 const uint8_t* pY[2] = { pSrc[0] + y * srcStep[0], pSrc[0] + (1ULL + y) * srcStep[0] };
209 const uint8_t* pU = pSrc[1] + (y / 2) * srcStep[1];
210 const uint8_t* pV = pSrc[2] + (y / 2) * srcStep[2];
211 uint8_t* pRGB[2] = { pDst + y * dstStep, pDst + (1ULL + y) * dstStep };
212
213 const pstatus_t rc =
214 neon_YUV420ToX_DOUBLE_ROW(pY, pU, pV, pRGB, nWidth, rPos, gPos, bPos, aPos);
215 if (rc != PRIMITIVES_SUCCESS)
216 return rc;
217 }
218
219 return PRIMITIVES_SUCCESS;
220}
221
222static pstatus_t neon_YUV420ToRGB_8u_P3AC4R(const BYTE* WINPR_RESTRICT pSrc[3],
223 const UINT32 srcStep[3], BYTE* WINPR_RESTRICT pDst,
224 UINT32 dstStep, UINT32 DstFormat,
225 const prim_size_t* WINPR_RESTRICT roi)
226{
227 switch (DstFormat)
228 {
229 case PIXEL_FORMAT_BGRA32:
230 case PIXEL_FORMAT_BGRX32:
231 return neon_YUV420ToX(pSrc, srcStep, pDst, dstStep, roi, 2, 1, 0, 3);
232
233 case PIXEL_FORMAT_RGBA32:
234 case PIXEL_FORMAT_RGBX32:
235 return neon_YUV420ToX(pSrc, srcStep, pDst, dstStep, roi, 0, 1, 2, 3);
236
237 case PIXEL_FORMAT_ARGB32:
238 case PIXEL_FORMAT_XRGB32:
239 return neon_YUV420ToX(pSrc, srcStep, pDst, dstStep, roi, 1, 2, 3, 0);
240
241 case PIXEL_FORMAT_ABGR32:
242 case PIXEL_FORMAT_XBGR32:
243 return neon_YUV420ToX(pSrc, srcStep, pDst, dstStep, roi, 3, 2, 1, 0);
244
245 default:
246 return generic->YUV420ToRGB_8u_P3AC4R(pSrc, srcStep, pDst, dstStep, DstFormat, roi);
247 }
248}
249
250static inline int16x8_t loadUVreg(uint8x8_t Vraw)
251{
252 const int16x8_t V = vreinterpretq_s16_u16(vmovl_u8(Vraw));
253 const int16x8_t c128 = vdupq_n_s16(128);
254 const int16x8_t E = vsubq_s16(V, c128);
255 return E;
256}
257
258static inline int16x8x2_t loadUV444(uint8x16_t Vld)
259{
260 const uint8x8x2_t V = { { vget_low_u8(Vld), vget_high_u8(Vld) } };
261 const int16x8x2_t res = { {
262 loadUVreg(V.val[0]),
263 loadUVreg(V.val[1]),
264 } };
265 return res;
266}
267
268static inline void avgUV(BYTE U[2][2])
269{
270 const BYTE u00 = U[0][0];
271 const INT16 umul = (INT16)u00 << 2;
272 const INT16 sum = (INT16)U[0][1] + U[1][0] + U[1][1];
273 const INT16 wavg = umul - sum;
274 const BYTE val = CONDITIONAL_CLIP(wavg, u00);
275 U[0][0] = val;
276}
277
278static inline void neon_avgUV(uint8x16_t pU[2])
279{
280 /* put even and odd values into different registers.
281 * U 0/0 is in lower half */
282 const uint8x16x2_t usplit = vuzpq_u8(pU[0], pU[1]);
283 const uint8x16_t ueven = usplit.val[0];
284 const uint8x16_t uodd = usplit.val[1];
285
286 const uint8x8_t u00 = vget_low_u8(ueven);
287 const uint8x8_t u01 = vget_low_u8(uodd);
288 const uint8x8_t u10 = vget_high_u8(ueven);
289 const uint8x8_t u11 = vget_high_u8(uodd);
290
291 /* Create sum of U01 + U10 + U11 */
292 const uint16x8_t uoddsum = vaddl_u8(u01, u10);
293 const uint16x8_t usum = vaddq_u16(uoddsum, vmovl_u8(u11));
294
295 /* U00 * 4 */
296 const uint16x8_t umul = vshll_n_u8(u00, 2);
297
298 /* U00 - (U01 + U10 + U11) */
299 const int16x8_t wavg = vsubq_s16(vreinterpretq_s16_u16(umul), vreinterpretq_s16_u16(usum));
300 const uint8x8_t avg = vqmovun_s16(wavg);
301
302 /* abs(u00 - avg) */
303 const uint8x8_t absdiff = vabd_u8(avg, u00);
304
305 /* (diff < 30) ? u00 : avg */
306 const uint8x8_t mask = vclt_u8(absdiff, vdup_n_u8(30));
307
308 /* out1 = u00 & mask */
309 const uint8x8_t out1 = vand_u8(u00, mask);
310
311 /* invmask = ~mask */
312 const uint8x8_t notmask = vmvn_u8(mask);
313
314 /* out2 = avg & invmask */
315 const uint8x8_t out2 = vand_u8(avg, notmask);
316
317 /* out = out1 | out2 */
318 const uint8x8_t out = vorr_u8(out1, out2);
319
320 const uint8x8x2_t ua = vzip_u8(out, u01);
321 const uint8x16_t u = vcombine_u8(ua.val[0], ua.val[1]);
322 pU[0] = u;
323}
324
325static INLINE pstatus_t neon_YUV444ToX_SINGLE_ROW(const BYTE* WINPR_RESTRICT pY,
326 const BYTE* WINPR_RESTRICT pU,
327 const BYTE* WINPR_RESTRICT pV,
328 BYTE* WINPR_RESTRICT pRGB, size_t width,
329 const uint8_t rPos, const uint8_t gPos,
330 const uint8_t bPos, const uint8_t aPos)
331{
332 WINPR_ASSERT(width % 2 == 0);
333
334 size_t x = 0;
335
336 for (; x < width - width % 16; x += 16)
337 {
338 uint8x16_t U = vld1q_u8(&pU[x]);
339 uint8x16_t V = vld1q_u8(&pV[x]);
340 const uint8x16_t Y0raw = vld1q_u8(&pY[x]);
341 const uint8x8x2_t Y0 = { { vget_low_u8(Y0raw), vget_high_u8(Y0raw) } };
342 const int16x8x2_t D0 = loadUV444(U);
343 const int16x8x2_t E0 = loadUV444(V);
344 neon_YuvToRgbPixel(&pRGB[4ULL * x], Y0, D0, E0, rPos, gPos, bPos, aPos);
345 }
346
347 for (; x < width; x += 2)
348 {
349 BYTE* rgb = &pRGB[x * 4];
350
351 for (size_t j = 0; j < 2; j++)
352 {
353 const BYTE y = pY[x + j];
354 const BYTE u = pU[x + j];
355 const BYTE v = pV[x + j];
356
357 neon_write_pixel(&rgb[4 * (j)], y, u, v, rPos, gPos, bPos, aPos);
358 }
359 }
360
361 return PRIMITIVES_SUCCESS;
362}
363
364static INLINE pstatus_t neon_YUV444ToX_DOUBLE_ROW(const BYTE* WINPR_RESTRICT pY[2],
365 const BYTE* WINPR_RESTRICT pU[2],
366 const BYTE* WINPR_RESTRICT pV[2],
367 BYTE* WINPR_RESTRICT pRGB[2], size_t width,
368 const uint8_t rPos, const uint8_t gPos,
369 const uint8_t bPos, const uint8_t aPos)
370{
371 WINPR_ASSERT(width % 2 == 0);
372
373 size_t x = 0;
374
375 for (; x < width - width % 16; x += 16)
376 {
377 uint8x16_t U[2] = { vld1q_u8(&pU[0][x]), vld1q_u8(&pU[1][x]) };
378 neon_avgUV(U);
379
380 uint8x16_t V[2] = { vld1q_u8(&pV[0][x]), vld1q_u8(&pV[1][x]) };
381 neon_avgUV(V);
382
383 const uint8x16_t Y0raw = vld1q_u8(&pY[0][x]);
384 const uint8x8x2_t Y0 = { { vget_low_u8(Y0raw), vget_high_u8(Y0raw) } };
385 const int16x8x2_t D0 = loadUV444(U[0]);
386 const int16x8x2_t E0 = loadUV444(V[0]);
387 neon_YuvToRgbPixel(&pRGB[0][4ULL * x], Y0, D0, E0, rPos, gPos, bPos, aPos);
388
389 const uint8x16_t Y1raw = vld1q_u8(&pY[1][x]);
390 const uint8x8x2_t Y1 = { { vget_low_u8(Y1raw), vget_high_u8(Y1raw) } };
391 const int16x8x2_t D1 = loadUV444(U[1]);
392 const int16x8x2_t E1 = loadUV444(V[1]);
393 neon_YuvToRgbPixel(&pRGB[1][4ULL * x], Y1, D1, E1, rPos, gPos, bPos, aPos);
394 }
395
396 for (; x < width; x += 2)
397 {
398 BYTE* rgb[2] = { &pRGB[0][x * 4], &pRGB[1][x * 4] };
399 BYTE U[2][2] = { { pU[0][x], pU[0][x + 1] }, { pU[1][x], pU[1][x + 1] } };
400 avgUV(U);
401
402 BYTE V[2][2] = { { pV[0][x], pV[0][x + 1] }, { pV[1][x], pV[1][x + 1] } };
403 avgUV(V);
404
405 for (size_t i = 0; i < 2; i++)
406 {
407 for (size_t j = 0; j < 2; j++)
408 {
409 const BYTE y = pY[i][x + j];
410 const BYTE u = U[i][j];
411 const BYTE v = V[i][j];
412
413 neon_write_pixel(&rgb[i][4 * (j)], y, u, v, rPos, gPos, bPos, aPos);
414 }
415 }
416 }
417
418 return PRIMITIVES_SUCCESS;
419}
420
421static INLINE pstatus_t neon_YUV444ToX(const BYTE* WINPR_RESTRICT pSrc[3], const UINT32 srcStep[3],
422 BYTE* WINPR_RESTRICT pDst, UINT32 dstStep,
423 const prim_size_t* WINPR_RESTRICT roi, const uint8_t rPos,
424 const uint8_t gPos, const uint8_t bPos, const uint8_t aPos)
425{
426 WINPR_ASSERT(roi);
427 const UINT32 nWidth = roi->width;
428 const UINT32 nHeight = roi->height;
429
430 size_t y = 0;
431 for (; y < nHeight - nHeight % 2; y += 2)
432 {
433 const uint8_t* WINPR_RESTRICT pY[2] = { pSrc[0] + y * srcStep[0],
434 pSrc[0] + (y + 1) * srcStep[0] };
435 const uint8_t* WINPR_RESTRICT pU[2] = { pSrc[1] + y * srcStep[1],
436 pSrc[1] + (y + 1) * srcStep[1] };
437 const uint8_t* WINPR_RESTRICT pV[2] = { pSrc[2] + y * srcStep[2],
438 pSrc[2] + (y + 1) * srcStep[2] };
439
440 uint8_t* WINPR_RESTRICT pRGB[2] = { &pDst[y * dstStep], &pDst[(y + 1) * dstStep] };
441
442 const pstatus_t rc =
443 neon_YUV444ToX_DOUBLE_ROW(pY, pU, pV, pRGB, nWidth, rPos, gPos, bPos, aPos);
444 if (rc != PRIMITIVES_SUCCESS)
445 return rc;
446 }
447 for (; y < nHeight; y++)
448 {
449 const uint8_t* WINPR_RESTRICT pY = pSrc[0] + y * srcStep[0];
450 const uint8_t* WINPR_RESTRICT pU = pSrc[1] + y * srcStep[1];
451 const uint8_t* WINPR_RESTRICT pV = pSrc[2] + y * srcStep[2];
452 uint8_t* WINPR_RESTRICT pRGB = &pDst[y * dstStep];
453
454 const pstatus_t rc =
455 neon_YUV444ToX_SINGLE_ROW(pY, pU, pV, pRGB, nWidth, rPos, gPos, bPos, aPos);
456 if (rc != PRIMITIVES_SUCCESS)
457 return rc;
458 }
459
460 return PRIMITIVES_SUCCESS;
461}
462
463static pstatus_t neon_YUV444ToRGB_8u_P3AC4R(const BYTE* WINPR_RESTRICT pSrc[3],
464 const UINT32 srcStep[3], BYTE* WINPR_RESTRICT pDst,
465 UINT32 dstStep, UINT32 DstFormat,
466 const prim_size_t* WINPR_RESTRICT roi)
467{
468 switch (DstFormat)
469 {
470 case PIXEL_FORMAT_BGRA32:
471 case PIXEL_FORMAT_BGRX32:
472 return neon_YUV444ToX(pSrc, srcStep, pDst, dstStep, roi, 2, 1, 0, 3);
473
474 case PIXEL_FORMAT_RGBA32:
475 case PIXEL_FORMAT_RGBX32:
476 return neon_YUV444ToX(pSrc, srcStep, pDst, dstStep, roi, 0, 1, 2, 3);
477
478 case PIXEL_FORMAT_ARGB32:
479 case PIXEL_FORMAT_XRGB32:
480 return neon_YUV444ToX(pSrc, srcStep, pDst, dstStep, roi, 1, 2, 3, 0);
481
482 case PIXEL_FORMAT_ABGR32:
483 case PIXEL_FORMAT_XBGR32:
484 return neon_YUV444ToX(pSrc, srcStep, pDst, dstStep, roi, 3, 2, 1, 0);
485
486 default:
487 return generic->YUV444ToRGB_8u_P3AC4R(pSrc, srcStep, pDst, dstStep, DstFormat, roi);
488 }
489}
490
491static pstatus_t neon_LumaToYUV444(const BYTE* WINPR_RESTRICT pSrcRaw[3], const UINT32 srcStep[3],
492 BYTE* WINPR_RESTRICT pDstRaw[3], const UINT32 dstStep[3],
493 const RECTANGLE_16* WINPR_RESTRICT roi)
494{
495 const UINT32 nWidth = roi->right - roi->left;
496 const UINT32 nHeight = roi->bottom - roi->top;
497 const UINT32 halfWidth = (nWidth + 1) / 2;
498 const UINT32 halfHeight = (nHeight + 1) / 2;
499 const UINT32 evenY = 0;
500 const BYTE* pSrc[3] = { pSrcRaw[0] + roi->top * srcStep[0] + roi->left,
501 pSrcRaw[1] + roi->top / 2 * srcStep[1] + roi->left / 2,
502 pSrcRaw[2] + roi->top / 2 * srcStep[2] + roi->left / 2 };
503 BYTE* pDst[3] = { pDstRaw[0] + roi->top * dstStep[0] + roi->left,
504 pDstRaw[1] + roi->top * dstStep[1] + roi->left,
505 pDstRaw[2] + roi->top * dstStep[2] + roi->left };
506
507 /* Y data is already here... */
508 /* B1 */
509 for (UINT32 y = 0; y < nHeight; y++)
510 {
511 const BYTE* Ym = pSrc[0] + srcStep[0] * y;
512 BYTE* pY = pDst[0] + dstStep[0] * y;
513 memcpy(pY, Ym, nWidth);
514 }
515
516 /* The first half of U, V are already here part of this frame. */
517 /* B2 and B3 */
518 for (UINT32 y = 0; y < halfHeight; y++)
519 {
520 const UINT32 val2y = (2 * y + evenY);
521 const BYTE* Um = pSrc[1] + srcStep[1] * y;
522 const BYTE* Vm = pSrc[2] + srcStep[2] * y;
523 BYTE* pU = pDst[1] + dstStep[1] * val2y;
524 BYTE* pV = pDst[2] + dstStep[2] * val2y;
525 BYTE* pU1 = pU + dstStep[1];
526 BYTE* pV1 = pV + dstStep[2];
527
528 UINT32 x = 0;
529 for (; x + 16 < halfWidth; x += 16)
530 {
531 {
532 const uint8x16_t u = vld1q_u8(Um);
533 uint8x16x2_t u2x;
534 u2x.val[0] = u;
535 u2x.val[1] = u;
536 vst2q_u8(pU, u2x);
537 vst2q_u8(pU1, u2x);
538 Um += 16;
539 pU += 32;
540 pU1 += 32;
541 }
542 {
543 const uint8x16_t v = vld1q_u8(Vm);
544 uint8x16x2_t v2x;
545 v2x.val[0] = v;
546 v2x.val[1] = v;
547 vst2q_u8(pV, v2x);
548 vst2q_u8(pV1, v2x);
549 Vm += 16;
550 pV += 32;
551 pV1 += 32;
552 }
553 }
554
555 for (; x < halfWidth; x++)
556 {
557 const BYTE u = *Um++;
558 const BYTE v = *Vm++;
559 *pU++ = u;
560 *pU++ = u;
561 *pU1++ = u;
562 *pU1++ = u;
563 *pV++ = v;
564 *pV++ = v;
565 *pV1++ = v;
566 *pV1++ = v;
567 }
568 }
569
570 return PRIMITIVES_SUCCESS;
571}
572
573static pstatus_t neon_ChromaV1ToYUV444(const BYTE* WINPR_RESTRICT pSrcRaw[3],
574 const UINT32 srcStep[3], BYTE* WINPR_RESTRICT pDstRaw[3],
575 const UINT32 dstStep[3],
576 const RECTANGLE_16* WINPR_RESTRICT roi)
577{
578 const UINT32 mod = 16;
579 UINT32 uY = 0;
580 UINT32 vY = 0;
581 const UINT32 nWidth = roi->right - roi->left;
582 const UINT32 nHeight = roi->bottom - roi->top;
583 const UINT32 halfWidth = (nWidth) / 2;
584 const UINT32 halfHeight = (nHeight) / 2;
585 const UINT32 oddY = 1;
586 const UINT32 evenY = 0;
587 const UINT32 oddX = 1;
588 /* The auxiliary frame is aligned to multiples of 16x16.
589 * We need the padded height for B4 and B5 conversion. */
590 const UINT32 padHeigth = nHeight + 16 - nHeight % 16;
591 const UINT32 halfPad = halfWidth % 16;
592 const BYTE* pSrc[3] = { pSrcRaw[0] + roi->top * srcStep[0] + roi->left,
593 pSrcRaw[1] + roi->top / 2 * srcStep[1] + roi->left / 2,
594 pSrcRaw[2] + roi->top / 2 * srcStep[2] + roi->left / 2 };
595 BYTE* pDst[3] = { pDstRaw[0] + roi->top * dstStep[0] + roi->left,
596 pDstRaw[1] + roi->top * dstStep[1] + roi->left,
597 pDstRaw[2] + roi->top * dstStep[2] + roi->left };
598
599 /* The second half of U and V is a bit more tricky... */
600 /* B4 and B5 */
601 for (UINT32 y = 0; y < padHeigth; y++)
602 {
603 const BYTE* Ya = pSrc[0] + srcStep[0] * y;
604 BYTE* pX;
605
606 if ((y) % mod < (mod + 1) / 2)
607 {
608 const UINT32 pos = (2 * uY++ + oddY);
609
610 if (pos >= nHeight)
611 continue;
612
613 pX = pDst[1] + dstStep[1] * pos;
614 }
615 else
616 {
617 const UINT32 pos = (2 * vY++ + oddY);
618
619 if (pos >= nHeight)
620 continue;
621
622 pX = pDst[2] + dstStep[2] * pos;
623 }
624
625 memcpy(pX, Ya, nWidth);
626 }
627
628 /* B6 and B7 */
629 for (UINT32 y = 0; y < halfHeight; y++)
630 {
631 const UINT32 val2y = (y * 2 + evenY);
632 const BYTE* Ua = pSrc[1] + srcStep[1] * y;
633 const BYTE* Va = pSrc[2] + srcStep[2] * y;
634 BYTE* pU = pDst[1] + dstStep[1] * val2y;
635 BYTE* pV = pDst[2] + dstStep[2] * val2y;
636
637 UINT32 x = 0;
638 for (; x < halfWidth - halfPad; x += 16)
639 {
640 {
641 uint8x16x2_t u = vld2q_u8(&pU[2 * x]);
642 u.val[1] = vld1q_u8(&Ua[x]);
643 vst2q_u8(&pU[2 * x], u);
644 }
645 {
646 uint8x16x2_t v = vld2q_u8(&pV[2 * x]);
647 v.val[1] = vld1q_u8(&Va[x]);
648 vst2q_u8(&pV[2 * x], v);
649 }
650 }
651
652 for (; x < halfWidth; x++)
653 {
654 const UINT32 val2x1 = (x * 2 + oddX);
655 pU[val2x1] = Ua[x];
656 pV[val2x1] = Va[x];
657 }
658 }
659
660 return PRIMITIVES_SUCCESS;
661}
662
663static pstatus_t neon_ChromaV2ToYUV444(const BYTE* WINPR_RESTRICT pSrc[3], const UINT32 srcStep[3],
664 UINT32 nTotalWidth, UINT32 nTotalHeight,
665 BYTE* WINPR_RESTRICT pDst[3], const UINT32 dstStep[3],
666 const RECTANGLE_16* WINPR_RESTRICT roi)
667{
668 const UINT32 nWidth = roi->right - roi->left;
669 const UINT32 nHeight = roi->bottom - roi->top;
670 const UINT32 halfWidth = (nWidth + 1) / 2;
671 const UINT32 halfPad = halfWidth % 16;
672 const UINT32 halfHeight = (nHeight + 1) / 2;
673 const UINT32 quaterWidth = (nWidth + 3) / 4;
674 const UINT32 quaterPad = quaterWidth % 16;
675
676 /* B4 and B5: odd UV values for width/2, height */
677 for (UINT32 y = 0; y < nHeight; y++)
678 {
679 const UINT32 yTop = y + roi->top;
680 const BYTE* pYaU = pSrc[0] + srcStep[0] * yTop + roi->left / 2;
681 const BYTE* pYaV = pYaU + nTotalWidth / 2;
682 BYTE* pU = pDst[1] + dstStep[1] * yTop + roi->left;
683 BYTE* pV = pDst[2] + dstStep[2] * yTop + roi->left;
684
685 UINT32 x = 0;
686 for (; x < halfWidth - halfPad; x += 16)
687 {
688 {
689 uint8x16x2_t u = vld2q_u8(&pU[2 * x]);
690 u.val[1] = vld1q_u8(&pYaU[x]);
691 vst2q_u8(&pU[2 * x], u);
692 }
693 {
694 uint8x16x2_t v = vld2q_u8(&pV[2 * x]);
695 v.val[1] = vld1q_u8(&pYaV[x]);
696 vst2q_u8(&pV[2 * x], v);
697 }
698 }
699
700 for (; x < halfWidth; x++)
701 {
702 const UINT32 odd = 2 * x + 1;
703 pU[odd] = pYaU[x];
704 pV[odd] = pYaV[x];
705 }
706 }
707
708 /* B6 - B9 */
709 for (UINT32 y = 0; y < halfHeight; y++)
710 {
711 const BYTE* pUaU = pSrc[1] + srcStep[1] * (y + roi->top / 2) + roi->left / 4;
712 const BYTE* pUaV = pUaU + nTotalWidth / 4;
713 const BYTE* pVaU = pSrc[2] + srcStep[2] * (y + roi->top / 2) + roi->left / 4;
714 const BYTE* pVaV = pVaU + nTotalWidth / 4;
715 BYTE* pU = pDst[1] + dstStep[1] * (2 * y + 1 + roi->top) + roi->left;
716 BYTE* pV = pDst[2] + dstStep[2] * (2 * y + 1 + roi->top) + roi->left;
717
718 UINT32 x = 0;
719 for (; x < quaterWidth - quaterPad; x += 16)
720 {
721 {
722 uint8x16x4_t u = vld4q_u8(&pU[4 * x]);
723 u.val[0] = vld1q_u8(&pUaU[x]);
724 u.val[2] = vld1q_u8(&pVaU[x]);
725 vst4q_u8(&pU[4 * x], u);
726 }
727 {
728 uint8x16x4_t v = vld4q_u8(&pV[4 * x]);
729 v.val[0] = vld1q_u8(&pUaV[x]);
730 v.val[2] = vld1q_u8(&pVaV[x]);
731 vst4q_u8(&pV[4 * x], v);
732 }
733 }
734
735 for (; x < quaterWidth; x++)
736 {
737 pU[4 * x + 0] = pUaU[x];
738 pV[4 * x + 0] = pUaV[x];
739 pU[4 * x + 2] = pVaU[x];
740 pV[4 * x + 2] = pVaV[x];
741 }
742 }
743
744 return PRIMITIVES_SUCCESS;
745}
746
747static pstatus_t neon_YUV420CombineToYUV444(avc444_frame_type type,
748 const BYTE* WINPR_RESTRICT pSrc[3],
749 const UINT32 srcStep[3], UINT32 nWidth, UINT32 nHeight,
750 BYTE* WINPR_RESTRICT pDst[3], const UINT32 dstStep[3],
751 const RECTANGLE_16* WINPR_RESTRICT roi)
752{
753 if (!pSrc || !pSrc[0] || !pSrc[1] || !pSrc[2])
754 return -1;
755
756 if (!pDst || !pDst[0] || !pDst[1] || !pDst[2])
757 return -1;
758
759 if (!roi)
760 return -1;
761
762 switch (type)
763 {
764 case AVC444_LUMA:
765 return neon_LumaToYUV444(pSrc, srcStep, pDst, dstStep, roi);
766
767 case AVC444_CHROMAv1:
768 return neon_ChromaV1ToYUV444(pSrc, srcStep, pDst, dstStep, roi);
769
770 case AVC444_CHROMAv2:
771 return neon_ChromaV2ToYUV444(pSrc, srcStep, nWidth, nHeight, pDst, dstStep, roi);
772
773 default:
774 return -1;
775 }
776}
777#endif
778
779void primitives_init_YUV_neon_int(primitives_t* WINPR_RESTRICT prims)
780{
781#if defined(NEON_INTRINSICS_ENABLED)
782 generic = primitives_get_generic();
783 WLog_VRB(PRIM_TAG, "NEON optimizations");
784 prims->YUV420ToRGB_8u_P3AC4R = neon_YUV420ToRGB_8u_P3AC4R;
785 prims->YUV444ToRGB_8u_P3AC4R = neon_YUV444ToRGB_8u_P3AC4R;
786 prims->YUV420CombineToYUV444 = neon_YUV420CombineToYUV444;
787#else
788 WLog_VRB(PRIM_TAG, "undefined WITH_SIMD or neon intrinsics not available");
789 WINPR_UNUSED(prims);
790#endif
791}