4:4:4 H.264 decoding support
[libav.git] / libavcodec / x86 / h264_idct.asm
CommitLineData
1d16a1cf
RB
1;*****************************************************************************
2;* MMX/SSE2-optimized H.264 iDCT
3;*****************************************************************************
4;* Copyright (C) 2004-2005 Michael Niedermayer, Loren Merritt
5;* Copyright (C) 2003-2008 x264 project
6;*
7;* Authors: Laurent Aimar <fenrir@via.ecp.fr>
8;* Loren Merritt <lorenm@u.washington.edu>
9;* Holger Lubitz <hal@duncan.ol.sub.de>
10;* Min Chen <chenm001.163.com>
11;*
2912e87a 12;* This file is part of Libav.
1d16a1cf 13;*
2912e87a 14;* Libav is free software; you can redistribute it and/or
1d16a1cf
RB
15;* modify it under the terms of the GNU Lesser General Public
16;* License as published by the Free Software Foundation; either
17;* version 2.1 of the License, or (at your option) any later version.
18;*
2912e87a 19;* Libav is distributed in the hope that it will be useful,
1d16a1cf
RB
20;* but WITHOUT ANY WARRANTY; without even the implied warranty of
21;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22;* Lesser General Public License for more details.
23;*
24;* You should have received a copy of the GNU Lesser General Public
2912e87a 25;* License along with Libav; if not, write to the Free Software
888fa31e 26;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
1d16a1cf
RB
27;*****************************************************************************
28
29%include "x86inc.asm"
30%include "x86util.asm"
31
32SECTION_RODATA
33
34; FIXME this table is a duplicate from h264data.h, and will be removed once the tables from, h264 have been split
c9c49387
JGG
35scan8_mem: db 4+ 1*8, 5+ 1*8, 4+ 2*8, 5+ 2*8
36 db 6+ 1*8, 7+ 1*8, 6+ 2*8, 7+ 2*8
37 db 4+ 3*8, 5+ 3*8, 4+ 4*8, 5+ 4*8
38 db 6+ 3*8, 7+ 3*8, 6+ 4*8, 7+ 4*8
39 db 4+ 6*8, 5+ 6*8, 4+ 7*8, 5+ 7*8
40 db 6+ 6*8, 7+ 6*8, 6+ 7*8, 7+ 7*8
41 db 4+ 8*8, 5+ 8*8, 4+ 9*8, 5+ 9*8
42 db 6+ 8*8, 7+ 8*8, 6+ 9*8, 7+ 9*8
43 db 4+11*8, 5+11*8, 4+12*8, 5+12*8
44 db 6+11*8, 7+11*8, 6+12*8, 7+12*8
45 db 4+13*8, 5+13*8, 4+14*8, 5+14*8
46 db 6+13*8, 7+13*8, 6+14*8, 7+14*8
1d16a1cf
RB
47%ifdef PIC
48%define scan8 r11
49%else
50%define scan8 scan8_mem
51%endif
52
53cextern pw_32
19fb234e 54cextern pw_1
1d16a1cf
RB
55
56SECTION .text
57
58; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
59%macro IDCT4_ADD 3
60 ; Load dct coeffs
61 movq m0, [%2]
62 movq m1, [%2+8]
63 movq m2, [%2+16]
64 movq m3, [%2+24]
65
d0005d34 66 IDCT4_1D w, 0, 1, 2, 3, 4, 5
1d16a1cf
RB
67 mova m6, [pw_32]
68 TRANSPOSE4x4W 0, 1, 2, 3, 4
69 paddw m0, m6
d0005d34 70 IDCT4_1D w, 0, 1, 2, 3, 4, 5
1d16a1cf
RB
71 pxor m7, m7
72
73 STORE_DIFFx2 m0, m1, m4, m5, m7, 6, %1, %3
74 lea %1, [%1+%3*2]
75 STORE_DIFFx2 m2, m3, m4, m5, m7, 6, %1, %3
76%endmacro
77
78INIT_MMX
79; ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride)
348493db 80cglobal h264_idct_add_8_mmx, 3, 3, 0
1d16a1cf
RB
81 IDCT4_ADD r0, r1, r2
82 RET
83
84%macro IDCT8_1D 2
85 mova m4, m5
86 mova m0, m1
87 psraw m4, 1
88 psraw m1, 1
89 paddw m4, m5
90 paddw m1, m0
91 paddw m4, m7
92 paddw m1, m5
93 psubw m4, m0
94 paddw m1, m3
95
96 psubw m0, m3
97 psubw m5, m3
98 paddw m0, m7
99 psubw m5, m7
100 psraw m3, 1
101 psraw m7, 1
102 psubw m0, m3
103 psubw m5, m7
104
105 mova m3, m4
106 mova m7, m1
107 psraw m1, 2
108 psraw m3, 2
109 paddw m3, m0
110 psraw m0, 2
111 paddw m1, m5
112 psraw m5, 2
113 psubw m0, m4
114 psubw m7, m5
115
116 mova m4, m2
117 mova m5, m6
118 psraw m4, 1
119 psraw m6, 1
120 psubw m4, m5
121 paddw m6, m2
122
123 mova m2, %1
124 mova m5, %2
d0005d34
DK
125 SUMSUB_BA w, 5, 2
126 SUMSUB_BA w, 6, 5
127 SUMSUB_BA w, 4, 2
128 SUMSUB_BA w, 7, 6
129 SUMSUB_BA w, 0, 4
130 SUMSUB_BA w, 3, 2
131 SUMSUB_BA w, 1, 5
348493db 132 SWAP 7, 6, 4, 5, 2, 3, 1, 0 ; 70315246 -> 01234567
1d16a1cf
RB
133%endmacro
134
135%macro IDCT8_1D_FULL 1
136 mova m7, [%1+112]
137 mova m6, [%1+ 96]
138 mova m5, [%1+ 80]
139 mova m3, [%1+ 48]
140 mova m2, [%1+ 32]
141 mova m1, [%1+ 16]
142 IDCT8_1D [%1], [%1+ 64]
143%endmacro
144
145; %1=int16_t *block, %2=int16_t *dstblock
146%macro IDCT8_ADD_MMX_START 2
147 IDCT8_1D_FULL %1
148 mova [%1], m7
149 TRANSPOSE4x4W 0, 1, 2, 3, 7
150 mova m7, [%1]
151 mova [%2 ], m0
152 mova [%2+16], m1
153 mova [%2+32], m2
154 mova [%2+48], m3
155 TRANSPOSE4x4W 4, 5, 6, 7, 3
156 mova [%2+ 8], m4
157 mova [%2+24], m5
158 mova [%2+40], m6
159 mova [%2+56], m7
160%endmacro
161
162; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
163%macro IDCT8_ADD_MMX_END 3
164 IDCT8_1D_FULL %2
165 mova [%2 ], m5
166 mova [%2+16], m6
167 mova [%2+32], m7
168
169 pxor m7, m7
170 STORE_DIFFx2 m0, m1, m5, m6, m7, 6, %1, %3
171 lea %1, [%1+%3*2]
172 STORE_DIFFx2 m2, m3, m5, m6, m7, 6, %1, %3
173 mova m0, [%2 ]
174 mova m1, [%2+16]
175 mova m2, [%2+32]
176 lea %1, [%1+%3*2]
177 STORE_DIFFx2 m4, m0, m5, m6, m7, 6, %1, %3
178 lea %1, [%1+%3*2]
179 STORE_DIFFx2 m1, m2, m5, m6, m7, 6, %1, %3
180%endmacro
181
182INIT_MMX
183; ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
348493db 184cglobal h264_idct8_add_8_mmx, 3, 4, 0
1d16a1cf
RB
185 %assign pad 128+4-(stack_offset&7)
186 SUB rsp, pad
187
188 add word [r1], 32
189 IDCT8_ADD_MMX_START r1 , rsp
190 IDCT8_ADD_MMX_START r1+8, rsp+64
191 lea r3, [r0+4]
192 IDCT8_ADD_MMX_END r0 , rsp, r2
193 IDCT8_ADD_MMX_END r3 , rsp+8, r2
194
195 ADD rsp, pad
196 RET
197
198; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
199%macro IDCT8_ADD_SSE 4
200 IDCT8_1D_FULL %2
201%ifdef ARCH_X86_64
202 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
203%else
204 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [%2], [%2+16]
205%endif
206 paddw m0, [pw_32]
207
208%ifndef ARCH_X86_64
209 mova [%2 ], m0
210 mova [%2+16], m4
211 IDCT8_1D [%2], [%2+ 16]
212 mova [%2 ], m6
213 mova [%2+16], m7
214%else
215 SWAP 0, 8
216 SWAP 4, 9
217 IDCT8_1D m8, m9
218 SWAP 6, 8
219 SWAP 7, 9
220%endif
221
222 pxor m7, m7
223 lea %4, [%3*3]
224 STORE_DIFF m0, m6, m7, [%1 ]
225 STORE_DIFF m1, m6, m7, [%1+%3 ]
226 STORE_DIFF m2, m6, m7, [%1+%3*2]
227 STORE_DIFF m3, m6, m7, [%1+%4 ]
228%ifndef ARCH_X86_64
229 mova m0, [%2 ]
230 mova m1, [%2+16]
231%else
232 SWAP 0, 8
233 SWAP 1, 9
234%endif
235 lea %1, [%1+%3*4]
236 STORE_DIFF m4, m6, m7, [%1 ]
237 STORE_DIFF m5, m6, m7, [%1+%3 ]
238 STORE_DIFF m0, m6, m7, [%1+%3*2]
239 STORE_DIFF m1, m6, m7, [%1+%4 ]
240%endmacro
241
242INIT_XMM
243; ff_h264_idct8_add_sse2(uint8_t *dst, int16_t *block, int stride)
348493db 244cglobal h264_idct8_add_8_sse2, 3, 4, 10
1d16a1cf
RB
245 IDCT8_ADD_SSE r0, r1, r2, r3
246 RET
247
248%macro DC_ADD_MMX2_INIT 2-3
249%if %0 == 2
250 movsx %1, word [%1]
251 add %1, 32
252 sar %1, 6
02b424d9 253 movd m0, %1d
1d16a1cf
RB
254 lea %1, [%2*3]
255%else
256 add %3, 32
257 sar %3, 6
02b424d9 258 movd m0, %3d
1d16a1cf
RB
259 lea %3, [%2*3]
260%endif
261 pshufw m0, m0, 0
262 pxor m1, m1
263 psubw m1, m0
264 packuswb m0, m0
265 packuswb m1, m1
266%endmacro
267
348493db 268%macro DC_ADD_MMX2_OP 4
1d16a1cf
RB
269 %1 m2, [%2 ]
270 %1 m3, [%2+%3 ]
271 %1 m4, [%2+%3*2]
272 %1 m5, [%2+%4 ]
273 paddusb m2, m0
274 paddusb m3, m0
275 paddusb m4, m0
276 paddusb m5, m0
277 psubusb m2, m1
278 psubusb m3, m1
279 psubusb m4, m1
280 psubusb m5, m1
281 %1 [%2 ], m2
282 %1 [%2+%3 ], m3
283 %1 [%2+%3*2], m4
284 %1 [%2+%4 ], m5
285%endmacro
286
287INIT_MMX
288; ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
348493db 289cglobal h264_idct_dc_add_8_mmx2, 3, 3, 0
1d16a1cf
RB
290 DC_ADD_MMX2_INIT r1, r2
291 DC_ADD_MMX2_OP movh, r0, r2, r1
292 RET
293
294; ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
348493db 295cglobal h264_idct8_dc_add_8_mmx2, 3, 3, 0
1d16a1cf
RB
296 DC_ADD_MMX2_INIT r1, r2
297 DC_ADD_MMX2_OP mova, r0, r2, r1
298 lea r0, [r0+r2*4]
299 DC_ADD_MMX2_OP mova, r0, r2, r1
300 RET
301
302; ff_h264_idct_add16_mmx(uint8_t *dst, const int *block_offset,
303; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
348493db 304cglobal h264_idct_add16_8_mmx, 5, 7, 0
1d16a1cf
RB
305 xor r5, r5
306%ifdef PIC
307 lea r11, [scan8_mem]
308%endif
309.nextblock
310 movzx r6, byte [scan8+r5]
311 movzx r6, byte [r4+r6]
312 test r6, r6
313 jz .skipblock
314 mov r6d, dword [r1+r5*4]
315 lea r6, [r0+r6]
316 IDCT4_ADD r6, r2, r3
317.skipblock
318 inc r5
319 add r2, 32
320 cmp r5, 16
321 jl .nextblock
322 REP_RET
323
324; ff_h264_idct8_add4_mmx(uint8_t *dst, const int *block_offset,
325; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
348493db 326cglobal h264_idct8_add4_8_mmx, 5, 7, 0
1d16a1cf
RB
327 %assign pad 128+4-(stack_offset&7)
328 SUB rsp, pad
329
330 xor r5, r5
331%ifdef PIC
332 lea r11, [scan8_mem]
333%endif
334.nextblock
335 movzx r6, byte [scan8+r5]
336 movzx r6, byte [r4+r6]
337 test r6, r6
338 jz .skipblock
339 mov r6d, dword [r1+r5*4]
340 lea r6, [r0+r6]
341 add word [r2], 32
342 IDCT8_ADD_MMX_START r2 , rsp
343 IDCT8_ADD_MMX_START r2+8, rsp+64
344 IDCT8_ADD_MMX_END r6 , rsp, r3
345 mov r6d, dword [r1+r5*4]
346 lea r6, [r0+r6+4]
347 IDCT8_ADD_MMX_END r6 , rsp+8, r3
348.skipblock
349 add r5, 4
350 add r2, 128
351 cmp r5, 16
352 jl .nextblock
353 ADD rsp, pad
354 RET
355
356; ff_h264_idct_add16_mmx2(uint8_t *dst, const int *block_offset,
357; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
348493db 358cglobal h264_idct_add16_8_mmx2, 5, 7, 0
1d16a1cf
RB
359 xor r5, r5
360%ifdef PIC
361 lea r11, [scan8_mem]
362%endif
363.nextblock
364 movzx r6, byte [scan8+r5]
365 movzx r6, byte [r4+r6]
366 test r6, r6
367 jz .skipblock
368 cmp r6, 1
369 jnz .no_dc
370 movsx r6, word [r2]
371 test r6, r6
372 jz .no_dc
373 DC_ADD_MMX2_INIT r2, r3, r6
374%ifdef ARCH_X86_64
375%define dst_reg r10
376%define dst_regd r10d
377%else
378%define dst_reg r1
379%define dst_regd r1d
380%endif
381 mov dst_regd, dword [r1+r5*4]
382 lea dst_reg, [r0+dst_reg]
383 DC_ADD_MMX2_OP movh, dst_reg, r3, r6
384%ifndef ARCH_X86_64
385 mov r1, r1m
386%endif
387 inc r5
388 add r2, 32
389 cmp r5, 16
390 jl .nextblock
391 REP_RET
392.no_dc
393 mov r6d, dword [r1+r5*4]
394 lea r6, [r0+r6]
395 IDCT4_ADD r6, r2, r3
396.skipblock
397 inc r5
398 add r2, 32
399 cmp r5, 16
400 jl .nextblock
401 REP_RET
402
403; ff_h264_idct_add16intra_mmx(uint8_t *dst, const int *block_offset,
404; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
348493db 405cglobal h264_idct_add16intra_8_mmx, 5, 7, 0
1d16a1cf
RB
406 xor r5, r5
407%ifdef PIC
408 lea r11, [scan8_mem]
409%endif
410.nextblock
411 movzx r6, byte [scan8+r5]
412 movzx r6, byte [r4+r6]
413 or r6w, word [r2]
414 test r6, r6
415 jz .skipblock
416 mov r6d, dword [r1+r5*4]
417 lea r6, [r0+r6]
418 IDCT4_ADD r6, r2, r3
419.skipblock
420 inc r5
421 add r2, 32
422 cmp r5, 16
423 jl .nextblock
424 REP_RET
425
426; ff_h264_idct_add16intra_mmx2(uint8_t *dst, const int *block_offset,
427; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
348493db 428cglobal h264_idct_add16intra_8_mmx2, 5, 7, 0
1d16a1cf
RB
429 xor r5, r5
430%ifdef PIC
431 lea r11, [scan8_mem]
432%endif
433.nextblock
434 movzx r6, byte [scan8+r5]
435 movzx r6, byte [r4+r6]
436 test r6, r6
437 jz .try_dc
438 mov r6d, dword [r1+r5*4]
439 lea r6, [r0+r6]
440 IDCT4_ADD r6, r2, r3
441 inc r5
442 add r2, 32
443 cmp r5, 16
444 jl .nextblock
445 REP_RET
446.try_dc
447 movsx r6, word [r2]
448 test r6, r6
449 jz .skipblock
450 DC_ADD_MMX2_INIT r2, r3, r6
451%ifdef ARCH_X86_64
452%define dst_reg r10
453%define dst_regd r10d
454%else
455%define dst_reg r1
456%define dst_regd r1d
457%endif
458 mov dst_regd, dword [r1+r5*4]
459 lea dst_reg, [r0+dst_reg]
460 DC_ADD_MMX2_OP movh, dst_reg, r3, r6
461%ifndef ARCH_X86_64
462 mov r1, r1m
463%endif
464.skipblock
465 inc r5
466 add r2, 32
467 cmp r5, 16
468 jl .nextblock
469 REP_RET
470
471; ff_h264_idct8_add4_mmx2(uint8_t *dst, const int *block_offset,
472; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
348493db 473cglobal h264_idct8_add4_8_mmx2, 5, 7, 0
1d16a1cf
RB
474 %assign pad 128+4-(stack_offset&7)
475 SUB rsp, pad
476
477 xor r5, r5
478%ifdef PIC
479 lea r11, [scan8_mem]
480%endif
481.nextblock
482 movzx r6, byte [scan8+r5]
483 movzx r6, byte [r4+r6]
484 test r6, r6
485 jz .skipblock
486 cmp r6, 1
487 jnz .no_dc
488 movsx r6, word [r2]
489 test r6, r6
490 jz .no_dc
491 DC_ADD_MMX2_INIT r2, r3, r6
492%ifdef ARCH_X86_64
493%define dst_reg r10
494%define dst_regd r10d
495%else
496%define dst_reg r1
497%define dst_regd r1d
498%endif
499 mov dst_regd, dword [r1+r5*4]
500 lea dst_reg, [r0+dst_reg]
501 DC_ADD_MMX2_OP mova, dst_reg, r3, r6
502 lea dst_reg, [dst_reg+r3*4]
503 DC_ADD_MMX2_OP mova, dst_reg, r3, r6
504%ifndef ARCH_X86_64
505 mov r1, r1m
506%endif
507 add r5, 4
508 add r2, 128
509 cmp r5, 16
510 jl .nextblock
511
512 ADD rsp, pad
513 RET
514.no_dc
515 mov r6d, dword [r1+r5*4]
516 lea r6, [r0+r6]
517 add word [r2], 32
518 IDCT8_ADD_MMX_START r2 , rsp
519 IDCT8_ADD_MMX_START r2+8, rsp+64
520 IDCT8_ADD_MMX_END r6 , rsp, r3
521 mov r6d, dword [r1+r5*4]
522 lea r6, [r0+r6+4]
523 IDCT8_ADD_MMX_END r6 , rsp+8, r3
524.skipblock
525 add r5, 4
526 add r2, 128
527 cmp r5, 16
528 jl .nextblock
529
530 ADD rsp, pad
531 RET
532
533INIT_XMM
534; ff_h264_idct8_add4_sse2(uint8_t *dst, const int *block_offset,
535; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
348493db 536cglobal h264_idct8_add4_8_sse2, 5, 7, 10
1d16a1cf
RB
537 xor r5, r5
538%ifdef PIC
539 lea r11, [scan8_mem]
540%endif
541.nextblock
542 movzx r6, byte [scan8+r5]
543 movzx r6, byte [r4+r6]
544 test r6, r6
545 jz .skipblock
546 cmp r6, 1
547 jnz .no_dc
548 movsx r6, word [r2]
549 test r6, r6
550 jz .no_dc
551INIT_MMX
552 DC_ADD_MMX2_INIT r2, r3, r6
553%ifdef ARCH_X86_64
554%define dst_reg r10
555%define dst_regd r10d
556%else
557%define dst_reg r1
558%define dst_regd r1d
559%endif
560 mov dst_regd, dword [r1+r5*4]
561 lea dst_reg, [r0+dst_reg]
562 DC_ADD_MMX2_OP mova, dst_reg, r3, r6
563 lea dst_reg, [dst_reg+r3*4]
564 DC_ADD_MMX2_OP mova, dst_reg, r3, r6
565%ifndef ARCH_X86_64
566 mov r1, r1m
567%endif
568 add r5, 4
569 add r2, 128
570 cmp r5, 16
571 jl .nextblock
572 REP_RET
573.no_dc
574INIT_XMM
575 mov dst_regd, dword [r1+r5*4]
576 lea dst_reg, [r0+dst_reg]
577 IDCT8_ADD_SSE dst_reg, r2, r3, r6
578%ifndef ARCH_X86_64
579 mov r1, r1m
580%endif
581.skipblock
582 add r5, 4
583 add r2, 128
584 cmp r5, 16
585 jl .nextblock
586 REP_RET
587
588INIT_MMX
589h264_idct_add8_mmx_plane:
590.nextblock
591 movzx r6, byte [scan8+r5]
592 movzx r6, byte [r4+r6]
593 or r6w, word [r2]
594 test r6, r6
595 jz .skipblock
596%ifdef ARCH_X86_64
597 mov r0d, dword [r1+r5*4]
598 add r0, [r10]
599%else
600 mov r0, r1m ; XXX r1m here is actually r0m of the calling func
601 mov r0, [r0]
602 add r0, dword [r1+r5*4]
603%endif
604 IDCT4_ADD r0, r2, r3
605.skipblock
606 inc r5
607 add r2, 32
608 test r5, 3
609 jnz .nextblock
610 rep ret
611
612; ff_h264_idct_add8_mmx(uint8_t **dest, const int *block_offset,
613; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
348493db 614cglobal h264_idct_add8_8_mmx, 5, 7, 0
1d16a1cf
RB
615 mov r5, 16
616 add r2, 512
617%ifdef PIC
618 lea r11, [scan8_mem]
619%endif
620%ifdef ARCH_X86_64
621 mov r10, r0
622%endif
623 call h264_idct_add8_mmx_plane
c9c49387
JGG
624 mov r5, 32
625 add r2, 384
1d16a1cf
RB
626%ifdef ARCH_X86_64
627 add r10, gprsize
628%else
629 add r0mp, gprsize
630%endif
631 call h264_idct_add8_mmx_plane
632 RET
633
634h264_idct_add8_mmx2_plane
635.nextblock
636 movzx r6, byte [scan8+r5]
637 movzx r6, byte [r4+r6]
638 test r6, r6
639 jz .try_dc
640%ifdef ARCH_X86_64
641 mov r0d, dword [r1+r5*4]
642 add r0, [r10]
643%else
644 mov r0, r1m ; XXX r1m here is actually r0m of the calling func
645 mov r0, [r0]
646 add r0, dword [r1+r5*4]
647%endif
648 IDCT4_ADD r0, r2, r3
649 inc r5
650 add r2, 32
651 test r5, 3
652 jnz .nextblock
653 rep ret
654.try_dc
655 movsx r6, word [r2]
656 test r6, r6
657 jz .skipblock
658 DC_ADD_MMX2_INIT r2, r3, r6
659%ifdef ARCH_X86_64
660 mov r0d, dword [r1+r5*4]
661 add r0, [r10]
662%else
663 mov r0, r1m ; XXX r1m here is actually r0m of the calling func
664 mov r0, [r0]
665 add r0, dword [r1+r5*4]
666%endif
667 DC_ADD_MMX2_OP movh, r0, r3, r6
668.skipblock
669 inc r5
670 add r2, 32
671 test r5, 3
672 jnz .nextblock
673 rep ret
674
675; ff_h264_idct_add8_mmx2(uint8_t **dest, const int *block_offset,
676; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
348493db 677cglobal h264_idct_add8_8_mmx2, 5, 7, 0
1d16a1cf
RB
678 mov r5, 16
679 add r2, 512
680%ifdef ARCH_X86_64
681 mov r10, r0
682%endif
683%ifdef PIC
684 lea r11, [scan8_mem]
685%endif
686 call h264_idct_add8_mmx2_plane
c9c49387
JGG
687 mov r5, 32
688 add r2, 384
1d16a1cf
RB
689%ifdef ARCH_X86_64
690 add r10, gprsize
691%else
692 add r0mp, gprsize
693%endif
694 call h264_idct_add8_mmx2_plane
695 RET
696
697INIT_MMX
698; r0 = uint8_t *dst, r2 = int16_t *block, r3 = int stride, r6=clobbered
699h264_idct_dc_add8_mmx2:
700 movd m0, [r2 ] ; 0 0 X D
701 punpcklwd m0, [r2+32] ; x X d D
702 paddsw m0, [pw_32]
703 psraw m0, 6
704 punpcklwd m0, m0 ; d d D D
705 pxor m1, m1 ; 0 0 0 0
706 psubw m1, m0 ; -d-d-D-D
707 packuswb m0, m1 ; -d-d-D-D d d D D
708 pshufw m1, m0, 0xFA ; -d-d-d-d-D-D-D-D
709 punpcklwd m0, m0 ; d d d d D D D D
710 lea r6, [r3*3]
711 DC_ADD_MMX2_OP movq, r0, r3, r6
712 ret
713
714ALIGN 16
715INIT_XMM
716; r0 = uint8_t *dst (clobbered), r2 = int16_t *block, r3 = int stride
717x264_add8x4_idct_sse2:
718 movq m0, [r2+ 0]
719 movq m1, [r2+ 8]
720 movq m2, [r2+16]
721 movq m3, [r2+24]
722 movhps m0, [r2+32]
723 movhps m1, [r2+40]
724 movhps m2, [r2+48]
725 movhps m3, [r2+56]
d0005d34 726 IDCT4_1D w,0,1,2,3,4,5
1d16a1cf
RB
727 TRANSPOSE2x4x4W 0,1,2,3,4
728 paddw m0, [pw_32]
d0005d34 729 IDCT4_1D w,0,1,2,3,4,5
1d16a1cf
RB
730 pxor m7, m7
731 STORE_DIFFx2 m0, m1, m4, m5, m7, 6, r0, r3
732 lea r0, [r0+r3*2]
733 STORE_DIFFx2 m2, m3, m4, m5, m7, 6, r0, r3
734 ret
735
736%macro add16_sse2_cycle 2
737 movzx r0, word [r4+%2]
738 test r0, r0
739 jz .cycle%1end
740 mov r0d, dword [r1+%1*8]
741%ifdef ARCH_X86_64
742 add r0, r10
743%else
744 add r0, r0m
745%endif
746 call x264_add8x4_idct_sse2
747.cycle%1end
748%if %1 < 7
749 add r2, 64
750%endif
751%endmacro
752
753; ff_h264_idct_add16_sse2(uint8_t *dst, const int *block_offset,
754; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
348493db 755cglobal h264_idct_add16_8_sse2, 5, 5, 8
1d16a1cf
RB
756%ifdef ARCH_X86_64
757 mov r10, r0
758%endif
759 ; unrolling of the loop leads to an average performance gain of
760 ; 20-25%
761 add16_sse2_cycle 0, 0xc
762 add16_sse2_cycle 1, 0x14
763 add16_sse2_cycle 2, 0xe
764 add16_sse2_cycle 3, 0x16
765 add16_sse2_cycle 4, 0x1c
766 add16_sse2_cycle 5, 0x24
767 add16_sse2_cycle 6, 0x1e
768 add16_sse2_cycle 7, 0x26
769 RET
770
ae112918
RB
771%macro add16intra_sse2_cycle 2
772 movzx r0, word [r4+%2]
1d16a1cf 773 test r0, r0
ae112918
RB
774 jz .try%1dc
775 mov r0d, dword [r1+%1*8]
1d16a1cf
RB
776%ifdef ARCH_X86_64
777 add r0, r10
778%else
779 add r0, r0m
780%endif
781 call x264_add8x4_idct_sse2
ae112918
RB
782 jmp .cycle%1end
783.try%1dc
1d16a1cf
RB
784 movsx r0, word [r2 ]
785 or r0w, word [r2+32]
ae112918
RB
786 jz .cycle%1end
787 mov r0d, dword [r1+%1*8]
1d16a1cf
RB
788%ifdef ARCH_X86_64
789 add r0, r10
790%else
791 add r0, r0m
792%endif
793 call h264_idct_dc_add8_mmx2
ae112918
RB
794.cycle%1end
795%if %1 < 7
1d16a1cf 796 add r2, 64
ae112918
RB
797%endif
798%endmacro
799
800; ff_h264_idct_add16intra_sse2(uint8_t *dst, const int *block_offset,
801; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
348493db 802cglobal h264_idct_add16intra_8_sse2, 5, 7, 8
ae112918
RB
803%ifdef ARCH_X86_64
804 mov r10, r0
805%endif
806 add16intra_sse2_cycle 0, 0xc
807 add16intra_sse2_cycle 1, 0x14
808 add16intra_sse2_cycle 2, 0xe
809 add16intra_sse2_cycle 3, 0x16
810 add16intra_sse2_cycle 4, 0x1c
811 add16intra_sse2_cycle 5, 0x24
812 add16intra_sse2_cycle 6, 0x1e
813 add16intra_sse2_cycle 7, 0x26
814 RET
1d16a1cf 815
4bca6774
RB
816%macro add8_sse2_cycle 2
817 movzx r0, word [r4+%2]
1d16a1cf 818 test r0, r0
4bca6774 819 jz .try%1dc
1d16a1cf 820%ifdef ARCH_X86_64
c9c49387 821 mov r0d, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
1d16a1cf
RB
822 add r0, [r10]
823%else
4bca6774 824 mov r0, r0m
1d16a1cf 825 mov r0, [r0]
c9c49387 826 add r0, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
1d16a1cf
RB
827%endif
828 call x264_add8x4_idct_sse2
4bca6774
RB
829 jmp .cycle%1end
830.try%1dc
1d16a1cf
RB
831 movsx r0, word [r2 ]
832 or r0w, word [r2+32]
4bca6774 833 jz .cycle%1end
1d16a1cf 834%ifdef ARCH_X86_64
c9c49387 835 mov r0d, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
1d16a1cf
RB
836 add r0, [r10]
837%else
4bca6774 838 mov r0, r0m
1d16a1cf 839 mov r0, [r0]
c9c49387 840 add r0, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
1d16a1cf
RB
841%endif
842 call h264_idct_dc_add8_mmx2
4bca6774 843.cycle%1end
c9c49387
JGG
844%if %1 == 1
845 add r2, 384+64
846%elif %1 < 3
1d16a1cf 847 add r2, 64
4bca6774
RB
848%endif
849%endmacro
1d16a1cf
RB
850
851; ff_h264_idct_add8_sse2(uint8_t **dest, const int *block_offset,
852; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
348493db 853cglobal h264_idct_add8_8_sse2, 5, 7, 8
1d16a1cf 854 add r2, 512
1d16a1cf
RB
855%ifdef ARCH_X86_64
856 mov r10, r0
857%endif
c9c49387
JGG
858 add8_sse2_cycle 0, 0x34
859 add8_sse2_cycle 1, 0x3c
1d16a1cf
RB
860%ifdef ARCH_X86_64
861 add r10, gprsize
862%else
863 add r0mp, gprsize
864%endif
c9c49387
JGG
865 add8_sse2_cycle 2, 0x5c
866 add8_sse2_cycle 3, 0x64
1d16a1cf 867 RET
19fb234e
JGG
868
869;void ff_h264_luma_dc_dequant_idct_mmx(DCTELEM *output, DCTELEM *input, int qmul)
870
871%macro WALSH4_1D 5
d0005d34
DK
872 SUMSUB_BADC w, %4, %3, %2, %1, %5
873 SUMSUB_BADC w, %4, %2, %3, %1, %5
19fb234e
JGG
874 SWAP %1, %4, %3
875%endmacro
876
877%macro DEQUANT_MMX 3
878 mova m7, [pw_1]
879 mova m4, %1
880 punpcklwd %1, m7
881 punpckhwd m4, m7
882 mova m5, %2
883 punpcklwd %2, m7
884 punpckhwd m5, m7
885 movd m7, t3d
886 punpckldq m7, m7
887 pmaddwd %1, m7
888 pmaddwd %2, m7
889 pmaddwd m4, m7
890 pmaddwd m5, m7
891 psrad %1, %3
892 psrad %2, %3
893 psrad m4, %3
894 psrad m5, %3
895 packssdw %1, m4
896 packssdw %2, m5
897%endmacro
898
899%macro STORE_WORDS_MMX 5
900 movd t0d, %1
901 psrlq %1, 32
902 movd t1d, %1
903 mov [t2+%2*32], t0w
904 mov [t2+%4*32], t1w
905 shr t0d, 16
906 shr t1d, 16
907 mov [t2+%3*32], t0w
908 mov [t2+%5*32], t1w
909%endmacro
910
911%macro DEQUANT_STORE_MMX 1
912 DEQUANT_MMX m0, m1, %1
913 STORE_WORDS_MMX m0, 0, 1, 4, 5
914 STORE_WORDS_MMX m1, 2, 3, 6, 7
915
916 DEQUANT_MMX m2, m3, %1
917 STORE_WORDS_MMX m2, 8, 9, 12, 13
918 STORE_WORDS_MMX m3, 10, 11, 14, 15
919%endmacro
920
921%macro STORE_WORDS_SSE 9
922 movd t0d, %1
923 psrldq %1, 4
924 movd t1d, %1
925 psrldq %1, 4
926 mov [t2+%2*32], t0w
927 mov [t2+%4*32], t1w
928 shr t0d, 16
929 shr t1d, 16
930 mov [t2+%3*32], t0w
931 mov [t2+%5*32], t1w
932 movd t0d, %1
933 psrldq %1, 4
934 movd t1d, %1
935 mov [t2+%6*32], t0w
936 mov [t2+%8*32], t1w
937 shr t0d, 16
938 shr t1d, 16
939 mov [t2+%7*32], t0w
940 mov [t2+%9*32], t1w
941%endmacro
942
943%macro DEQUANT_STORE_SSE2 1
944 movd xmm4, t3d
945 movq xmm5, [pw_1]
946 pshufd xmm4, xmm4, 0
947 movq2dq xmm0, m0
948 movq2dq xmm1, m1
949 movq2dq xmm2, m2
950 movq2dq xmm3, m3
951 punpcklwd xmm0, xmm5
952 punpcklwd xmm1, xmm5
953 punpcklwd xmm2, xmm5
954 punpcklwd xmm3, xmm5
955 pmaddwd xmm0, xmm4
956 pmaddwd xmm1, xmm4
957 pmaddwd xmm2, xmm4
958 pmaddwd xmm3, xmm4
959 psrad xmm0, %1
960 psrad xmm1, %1
961 psrad xmm2, %1
962 psrad xmm3, %1
963 packssdw xmm0, xmm1
964 packssdw xmm2, xmm3
965 STORE_WORDS_SSE xmm0, 0, 1, 4, 5, 2, 3, 6, 7
966 STORE_WORDS_SSE xmm2, 8, 9, 12, 13, 10, 11, 14, 15
967%endmacro
968
969%macro IDCT_DC_DEQUANT 2
970cglobal h264_luma_dc_dequant_idct_%1, 3,4,%2
971 movq m3, [r1+24]
972 movq m2, [r1+16]
973 movq m1, [r1+ 8]
974 movq m0, [r1+ 0]
975 WALSH4_1D 0,1,2,3,4
976 TRANSPOSE4x4W 0,1,2,3,4
977 WALSH4_1D 0,1,2,3,4
978
979; shift, tmp, output, qmul
980%ifdef WIN64
981 DECLARE_REG_TMP 0,3,1,2
982 ; we can't avoid this, because r0 is the shift register (ecx) on win64
983 xchg r0, t2
984%elifdef ARCH_X86_64
985 DECLARE_REG_TMP 3,1,0,2
986%else
987 DECLARE_REG_TMP 1,3,0,2
988%endif
989
990 cmp t3d, 32767
991 jg .big_qmul
992 add t3d, 128 << 16
993%ifidn %1,mmx
994 DEQUANT_STORE_MMX 8
995%else
996 DEQUANT_STORE_SSE2 8
997%endif
998 RET
999.big_qmul:
1000 bsr t0d, t3d
1001 add t3d, 128 << 16
1002 mov t1d, 7
1003 cmp t0d, t1d
1004 cmovg t0d, t1d
1005 inc t1d
1006 shr t3d, t0b
1007 sub t1d, t0d
1008%ifidn %1,mmx
1009 movd m6, t1d
1010 DEQUANT_STORE_MMX m6
1011%else
1012 movd xmm6, t1d
1013 DEQUANT_STORE_SSE2 xmm6
1014%endif
1015 RET
1016%endmacro
1017
1018INIT_MMX
1019IDCT_DC_DEQUANT mmx, 0
1020IDCT_DC_DEQUANT sse2, 7