2 * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
3 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * H.264 / AVC / MPEG4 part10 codec.
24 * @author Michael Niedermayer <michaelni@gmx.at>
30 #include "mpegvideo.h"
39 #define interlaced_dct interlaced_dct_is_a_bad_name
40 #define mb_intra mb_intra_isnt_initalized_see_mb_type
42 #define LUMA_DC_BLOCK_INDEX 25
43 #define CHROMA_DC_BLOCK_INDEX 26
45 #define CHROMA_DC_COEFF_TOKEN_VLC_BITS 8
46 #define COEFF_TOKEN_VLC_BITS 8
47 #define TOTAL_ZEROS_VLC_BITS 9
48 #define CHROMA_DC_TOTAL_ZEROS_VLC_BITS 3
49 #define RUN_VLC_BITS 3
50 #define RUN7_VLC_BITS 6
52 #define MAX_SPS_COUNT 32
53 #define MAX_PPS_COUNT 256
55 #define MAX_MMCO_COUNT 66
58 * Sequence parameter set
64 int log2_max_frame_num
; ///< log2_max_frame_num_minus4 + 4
65 int poc_type
; ///< pic_order_cnt_type
66 int log2_max_poc_lsb
; ///< log2_max_pic_order_cnt_lsb_minus4
67 int delta_pic_order_always_zero_flag
;
68 int offset_for_non_ref_pic
;
69 int offset_for_top_to_bottom_field
;
70 int poc_cycle_length
; ///< num_ref_frames_in_pic_order_cnt_cycle
71 int ref_frame_count
; ///< num_ref_frames
72 int gaps_in_frame_num_allowed_flag
;
73 int mb_width
; ///< frame_width_in_mbs_minus1 + 1
74 int mb_height
; ///< frame_height_in_mbs_minus1 + 1
75 int frame_mbs_only_flag
;
76 int mb_aff
; ///<mb_adaptive_frame_field_flag
77 int direct_8x8_inference_flag
;
78 int crop
; ///< frame_cropping_flag
79 int crop_left
; ///< frame_cropping_rect_left_offset
80 int crop_right
; ///< frame_cropping_rect_right_offset
81 int crop_top
; ///< frame_cropping_rect_top_offset
82 int crop_bottom
; ///< frame_cropping_rect_bottom_offset
83 int vui_parameters_present_flag
;
85 int timing_info_present_flag
;
86 uint32_t num_units_in_tick
;
88 int fixed_frame_rate_flag
;
89 short offset_for_ref_frame
[256]; //FIXME dyn aloc?
90 int bitstream_restriction_flag
;
91 int num_reorder_frames
;
95 * Picture parameter set
99 int cabac
; ///< entropy_coding_mode_flag
100 int pic_order_present
; ///< pic_order_present_flag
101 int slice_group_count
; ///< num_slice_groups_minus1 + 1
102 int mb_slice_group_map_type
;
103 int ref_count
[2]; ///< num_ref_idx_l0/1_active_minus1 + 1
104 int weighted_pred
; ///< weighted_pred_flag
105 int weighted_bipred_idc
;
106 int init_qp
; ///< pic_init_qp_minus26 + 26
107 int init_qs
; ///< pic_init_qs_minus26 + 26
108 int chroma_qp_index_offset
;
109 int deblocking_filter_parameters_present
; ///< deblocking_filter_parameters_present_flag
110 int constrained_intra_pred
; ///< constrained_intra_pred_flag
111 int redundant_pic_cnt_present
; ///< redundant_pic_cnt_present_flag
115 * Memory management control operation opcode.
117 typedef enum MMCOOpcode
{
128 * Memory management control operation.
139 typedef struct H264Context
{
147 #define NAL_IDR_SLICE 5
151 #define NAL_PICTURE_DELIMITER 9
152 #define NAL_FILTER_DATA 10
153 uint8_t *rbsp_buffer
;
154 int rbsp_buffer_size
;
157 * Used to parse AVC variant of h264
159 int is_avc
; ///< this flag is != 0 if codec is avc1
160 int got_avcC
; ///< flag used to parse avcC data only once
161 int nal_length_size
; ///< Number of bytes used for nal length (1, 2 or 4)
165 int prev_mb_skipped
; //FIXME remove (IMHO not used)
168 int chroma_pred_mode
;
169 int intra16x16_pred_mode
;
174 int8_t intra4x4_pred_mode_cache
[5*8];
175 int8_t (*intra4x4_pred_mode
)[8];
176 void (*pred4x4
[9+3])(uint8_t *src
, uint8_t *topright
, int stride
);//FIXME move to dsp?
177 void (*pred8x8
[4+3])(uint8_t *src
, int stride
);
178 void (*pred16x16
[4+3])(uint8_t *src
, int stride
);
179 unsigned int topleft_samples_available
;
180 unsigned int top_samples_available
;
181 unsigned int topright_samples_available
;
182 unsigned int left_samples_available
;
183 uint8_t (*top_borders
[2])[16+2*8];
184 uint8_t left_border
[2*(17+2*9)];
187 * non zero coeff count cache.
188 * is 64 if not available.
190 uint8_t non_zero_count_cache
[6*8] __align8
;
191 uint8_t (*non_zero_count
)[16];
194 * Motion vector cache.
196 int16_t mv_cache
[2][5*8][2] __align8
;
197 int8_t ref_cache
[2][5*8] __align8
;
198 #define LIST_NOT_USED -1 //FIXME rename?
199 #define PART_NOT_AVAILABLE -2
202 * is 1 if the specific list MV&references are set to 0,0,-2.
204 int mv_cache_clean
[2];
207 * block_offset[ 0..23] for frame macroblocks
208 * block_offset[24..47] for field macroblocks
210 int block_offset
[2*(16+8)];
212 uint32_t *mb2b_xy
; //FIXME are these 4 a good idea?
214 int b_stride
; //FIXME use s->b4_stride
220 int unknown_svq3_flag
;
221 int next_slice_index
;
223 SPS sps_buffer
[MAX_SPS_COUNT
];
224 SPS sps
; ///< current sps
226 PPS pps_buffer
[MAX_PPS_COUNT
];
230 PPS pps
; //FIXME move to Picture perhaps? (->no) do we need that?
233 uint8_t *slice_table_base
;
234 uint8_t *slice_table
; ///< slice_table_base + mb_stride + 1
236 int slice_type_fixed
;
238 //interlacing specific flags
240 int mb_field_decoding_flag
;
247 int delta_poc_bottom
;
250 int prev_poc_msb
; ///< poc_msb of the last reference pic for POC type 0
251 int prev_poc_lsb
; ///< poc_lsb of the last reference pic for POC type 0
252 int frame_num_offset
; ///< for POC type 2
253 int prev_frame_num_offset
; ///< for POC type 2
254 int prev_frame_num
; ///< frame_num of the last pic for POC type 1/2
257 * frame_num for frames or 2*frame_num for field pics.
262 * max_frame_num or 2*max_frame_num for field pics.
266 //Weighted pred stuff
268 int use_weight_chroma
;
269 int luma_log2_weight_denom
;
270 int chroma_log2_weight_denom
;
271 int luma_weight
[2][16];
272 int luma_offset
[2][16];
273 int chroma_weight
[2][16][2];
274 int chroma_offset
[2][16][2];
275 int implicit_weight
[16][16];
278 int deblocking_filter
; ///< disable_deblocking_filter_idc with 1<->0
279 int slice_alpha_c0_offset
;
280 int slice_beta_offset
;
282 int redundant_pic_count
;
284 int direct_spatial_mv_pred
;
285 int dist_scale_factor
[16];
286 int map_col_to_list0
[2][16];
289 * num_ref_idx_l0/1_active_minus1 + 1
291 int ref_count
[2];// FIXME split for AFF
292 Picture
*short_ref
[32];
293 Picture
*long_ref
[32];
294 Picture default_ref_list
[2][32];
295 Picture ref_list
[2][32]; //FIXME size?
296 Picture field_ref_list
[2][32]; //FIXME size?
297 Picture
*delayed_pic
[16]; //FIXME size?
298 Picture
*delayed_output_pic
;
301 * memory management control operations buffer.
303 MMCO mmco
[MAX_MMCO_COUNT
];
306 int long_ref_count
; ///< number of actual long term references
307 int short_ref_count
; ///< number of actual short term references
310 GetBitContext intra_gb
;
311 GetBitContext inter_gb
;
312 GetBitContext
*intra_gb_ptr
;
313 GetBitContext
*inter_gb_ptr
;
315 DCTELEM mb
[16*24] __align8
;
321 uint8_t cabac_state
[399];
324 /* 0x100 -> non null luma_dc, 0x80/0x40 -> non null chroma_dc (cb/cr), 0x?0 -> chroma_cbp(0,1,2), 0x0? luma_cbp */
328 /* chroma_pred_mode for i4x4 or i16x16, else 0 */
329 uint8_t *chroma_pred_mode_table
;
330 int last_qscale_diff
;
331 int16_t (*mvd_table
[2])[2];
332 int16_t mvd_cache
[2][5*8][2] __align8
;
333 uint8_t *direct_table
;
334 uint8_t direct_cache
[5*8];
336 uint8_t zigzag_scan
[16];
337 uint8_t field_scan
[16];
340 static VLC coeff_token_vlc
[4];
341 static VLC chroma_dc_coeff_token_vlc
;
343 static VLC total_zeros_vlc
[15];
344 static VLC chroma_dc_total_zeros_vlc
[3];
346 static VLC run_vlc
[6];
349 static void svq3_luma_dc_dequant_idct_c(DCTELEM
*block
, int qp
);
350 static void svq3_add_idct_c(uint8_t *dst
, DCTELEM
*block
, int stride
, int qp
, int dc
);
351 static void filter_mb( H264Context
*h
, int mb_x
, int mb_y
, uint8_t *img_y
, uint8_t *img_cb
, uint8_t *img_cr
, unsigned int linesize
, unsigned int uvlinesize
);
353 static inline uint32_t pack16to32(int a
, int b
){
354 #ifdef WORDS_BIGENDIAN
355 return (b
&0xFFFF) + (a
<<16);
357 return (a
&0xFFFF) + (b
<<16);
363 * @param h height of the rectangle, should be a constant
364 * @param w width of the rectangle, should be a constant
365 * @param size the size of val (1 or 4), should be a constant
367 static inline void fill_rectangle(void *vp
, int w
, int h
, int stride
, uint32_t val
, int size
){ //FIXME ensure this IS inlined
368 uint8_t *p
= (uint8_t*)vp
;
369 assert(size
==1 || size
==4);
374 assert((((int)vp
)&(FFMIN(w
, STRIDE_ALIGN
)-1)) == 0);
375 assert((stride
&(w
-1))==0);
376 //FIXME check what gcc generates for 64 bit on x86 and possibly write a 32 bit ver of it
379 *(uint16_t*)(p
+ stride
)= size
==4 ? val
: val
*0x0101;
380 }else if(w
==2 && h
==4){
381 *(uint16_t*)(p
+ 0*stride
)=
382 *(uint16_t*)(p
+ 1*stride
)=
383 *(uint16_t*)(p
+ 2*stride
)=
384 *(uint16_t*)(p
+ 3*stride
)= size
==4 ? val
: val
*0x0101;
385 }else if(w
==4 && h
==1){
386 *(uint32_t*)(p
+ 0*stride
)= size
==4 ? val
: val
*0x01010101;
387 }else if(w
==4 && h
==2){
388 *(uint32_t*)(p
+ 0*stride
)=
389 *(uint32_t*)(p
+ 1*stride
)= size
==4 ? val
: val
*0x01010101;
390 }else if(w
==4 && h
==4){
391 *(uint32_t*)(p
+ 0*stride
)=
392 *(uint32_t*)(p
+ 1*stride
)=
393 *(uint32_t*)(p
+ 2*stride
)=
394 *(uint32_t*)(p
+ 3*stride
)= size
==4 ? val
: val
*0x01010101;
395 }else if(w
==8 && h
==1){
397 *(uint32_t*)(p
+ 4)= size
==4 ? val
: val
*0x01010101;
398 }else if(w
==8 && h
==2){
399 *(uint32_t*)(p
+ 0 + 0*stride
)=
400 *(uint32_t*)(p
+ 4 + 0*stride
)=
401 *(uint32_t*)(p
+ 0 + 1*stride
)=
402 *(uint32_t*)(p
+ 4 + 1*stride
)= size
==4 ? val
: val
*0x01010101;
403 }else if(w
==8 && h
==4){
404 *(uint64_t*)(p
+ 0*stride
)=
405 *(uint64_t*)(p
+ 1*stride
)=
406 *(uint64_t*)(p
+ 2*stride
)=
407 *(uint64_t*)(p
+ 3*stride
)= size
==4 ? val
*0x0100000001ULL
: val
*0x0101010101010101ULL
;
408 }else if(w
==16 && h
==2){
409 *(uint64_t*)(p
+ 0+0*stride
)=
410 *(uint64_t*)(p
+ 8+0*stride
)=
411 *(uint64_t*)(p
+ 0+1*stride
)=
412 *(uint64_t*)(p
+ 8+1*stride
)= size
==4 ? val
*0x0100000001ULL
: val
*0x0101010101010101ULL
;
413 }else if(w
==16 && h
==4){
414 *(uint64_t*)(p
+ 0+0*stride
)=
415 *(uint64_t*)(p
+ 8+0*stride
)=
416 *(uint64_t*)(p
+ 0+1*stride
)=
417 *(uint64_t*)(p
+ 8+1*stride
)=
418 *(uint64_t*)(p
+ 0+2*stride
)=
419 *(uint64_t*)(p
+ 8+2*stride
)=
420 *(uint64_t*)(p
+ 0+3*stride
)=
421 *(uint64_t*)(p
+ 8+3*stride
)= size
==4 ? val
*0x0100000001ULL
: val
*0x0101010101010101ULL
;
426 static inline void fill_caches(H264Context
*h
, int mb_type
, int for_deblock
){
427 MpegEncContext
* const s
= &h
->s
;
428 const int mb_xy
= s
->mb_x
+ s
->mb_y
*s
->mb_stride
;
429 int topleft_xy
, top_xy
, topright_xy
, left_xy
[2];
430 int topleft_type
, top_type
, topright_type
, left_type
[2];
434 //FIXME deblocking can skip fill_caches much of the time with multiple slices too.
435 // the actual condition is whether we're on the edge of a slice,
436 // and even then the intra and nnz parts are unnecessary.
437 if(for_deblock
&& h
->slice_num
== 1)
440 //wow what a mess, why didn't they simplify the interlacing&intra stuff, i can't imagine that these complex rules are worth it
442 top_xy
= mb_xy
- s
->mb_stride
;
443 topleft_xy
= top_xy
- 1;
444 topright_xy
= top_xy
+ 1;
445 left_xy
[1] = left_xy
[0] = mb_xy
-1;
455 const int pair_xy
= s
->mb_x
+ (s
->mb_y
& ~1)*s
->mb_stride
;
456 const int top_pair_xy
= pair_xy
- s
->mb_stride
;
457 const int topleft_pair_xy
= top_pair_xy
- 1;
458 const int topright_pair_xy
= top_pair_xy
+ 1;
459 const int topleft_mb_frame_flag
= !IS_INTERLACED(s
->current_picture
.mb_type
[topleft_pair_xy
]);
460 const int top_mb_frame_flag
= !IS_INTERLACED(s
->current_picture
.mb_type
[top_pair_xy
]);
461 const int topright_mb_frame_flag
= !IS_INTERLACED(s
->current_picture
.mb_type
[topright_pair_xy
]);
462 const int left_mb_frame_flag
= !IS_INTERLACED(s
->current_picture
.mb_type
[pair_xy
-1]);
463 const int curr_mb_frame_flag
= !IS_INTERLACED(mb_type
);
464 const int bottom
= (s
->mb_y
& 1);
465 tprintf("fill_caches: curr_mb_frame_flag:%d, left_mb_frame_flag:%d, topleft_mb_frame_flag:%d, top_mb_frame_flag:%d, topright_mb_frame_flag:%d\n", curr_mb_frame_flag
, left_mb_frame_flag
, topleft_mb_frame_flag
, top_mb_frame_flag
, topright_mb_frame_flag
);
467 ?
!curr_mb_frame_flag
// bottom macroblock
468 : (!curr_mb_frame_flag
&& !top_mb_frame_flag
) // top macroblock
470 top_xy
-= s
->mb_stride
;
473 ?
!curr_mb_frame_flag
// bottom macroblock
474 : (!curr_mb_frame_flag
&& !topleft_mb_frame_flag
) // top macroblock
476 topleft_xy
-= s
->mb_stride
;
479 ?
!curr_mb_frame_flag
// bottom macroblock
480 : (!curr_mb_frame_flag
&& !topright_mb_frame_flag
) // top macroblock
482 topright_xy
-= s
->mb_stride
;
484 if (left_mb_frame_flag
!= curr_mb_frame_flag
) {
485 left_xy
[1] = left_xy
[0] = pair_xy
- 1;
486 if (curr_mb_frame_flag
) {
507 left_xy
[1] += s
->mb_stride
;
520 h
->top_mb_xy
= top_xy
;
521 h
->left_mb_xy
[0] = left_xy
[0];
522 h
->left_mb_xy
[1] = left_xy
[1];
524 topleft_type
= h
->slice_table
[topleft_xy
] < 255 ? s
->current_picture
.mb_type
[topleft_xy
] : 0;
525 top_type
= h
->slice_table
[top_xy
] < 255 ? s
->current_picture
.mb_type
[top_xy
] : 0;
526 topright_type
= h
->slice_table
[topright_xy
] < 255 ? s
->current_picture
.mb_type
[topright_xy
]: 0;
527 left_type
[0] = h
->slice_table
[left_xy
[0] ] < 255 ? s
->current_picture
.mb_type
[left_xy
[0]] : 0;
528 left_type
[1] = h
->slice_table
[left_xy
[1] ] < 255 ? s
->current_picture
.mb_type
[left_xy
[1]] : 0;
530 topleft_type
= h
->slice_table
[topleft_xy
] == h
->slice_num ? s
->current_picture
.mb_type
[topleft_xy
] : 0;
531 top_type
= h
->slice_table
[top_xy
] == h
->slice_num ? s
->current_picture
.mb_type
[top_xy
] : 0;
532 topright_type
= h
->slice_table
[topright_xy
] == h
->slice_num ? s
->current_picture
.mb_type
[topright_xy
]: 0;
533 left_type
[0] = h
->slice_table
[left_xy
[0] ] == h
->slice_num ? s
->current_picture
.mb_type
[left_xy
[0]] : 0;
534 left_type
[1] = h
->slice_table
[left_xy
[1] ] == h
->slice_num ? s
->current_picture
.mb_type
[left_xy
[1]] : 0;
537 if(IS_INTRA(mb_type
)){
538 h
->topleft_samples_available
=
539 h
->top_samples_available
=
540 h
->left_samples_available
= 0xFFFF;
541 h
->topright_samples_available
= 0xEEEA;
543 if(!IS_INTRA(top_type
) && (top_type
==0 || h
->pps
.constrained_intra_pred
)){
544 h
->topleft_samples_available
= 0xB3FF;
545 h
->top_samples_available
= 0x33FF;
546 h
->topright_samples_available
= 0x26EA;
549 if(!IS_INTRA(left_type
[i
]) && (left_type
[i
]==0 || h
->pps
.constrained_intra_pred
)){
550 h
->topleft_samples_available
&= 0xDF5F;
551 h
->left_samples_available
&= 0x5F5F;
555 if(!IS_INTRA(topleft_type
) && (topleft_type
==0 || h
->pps
.constrained_intra_pred
))
556 h
->topleft_samples_available
&= 0x7FFF;
558 if(!IS_INTRA(topright_type
) && (topright_type
==0 || h
->pps
.constrained_intra_pred
))
559 h
->topright_samples_available
&= 0xFBFF;
561 if(IS_INTRA4x4(mb_type
)){
562 if(IS_INTRA4x4(top_type
)){
563 h
->intra4x4_pred_mode_cache
[4+8*0]= h
->intra4x4_pred_mode
[top_xy
][4];
564 h
->intra4x4_pred_mode_cache
[5+8*0]= h
->intra4x4_pred_mode
[top_xy
][5];
565 h
->intra4x4_pred_mode_cache
[6+8*0]= h
->intra4x4_pred_mode
[top_xy
][6];
566 h
->intra4x4_pred_mode_cache
[7+8*0]= h
->intra4x4_pred_mode
[top_xy
][3];
569 if(!top_type
|| (IS_INTER(top_type
) && h
->pps
.constrained_intra_pred
))
574 h
->intra4x4_pred_mode_cache
[4+8*0]=
575 h
->intra4x4_pred_mode_cache
[5+8*0]=
576 h
->intra4x4_pred_mode_cache
[6+8*0]=
577 h
->intra4x4_pred_mode_cache
[7+8*0]= pred
;
580 if(IS_INTRA4x4(left_type
[i
])){
581 h
->intra4x4_pred_mode_cache
[3+8*1 + 2*8*i
]= h
->intra4x4_pred_mode
[left_xy
[i
]][left_block
[0+2*i
]];
582 h
->intra4x4_pred_mode_cache
[3+8*2 + 2*8*i
]= h
->intra4x4_pred_mode
[left_xy
[i
]][left_block
[1+2*i
]];
585 if(!left_type
[i
] || (IS_INTER(left_type
[i
]) && h
->pps
.constrained_intra_pred
))
590 h
->intra4x4_pred_mode_cache
[3+8*1 + 2*8*i
]=
591 h
->intra4x4_pred_mode_cache
[3+8*2 + 2*8*i
]= pred
;
606 //FIXME constraint_intra_pred & partitioning & nnz (lets hope this is just a typo in the spec)
608 h
->non_zero_count_cache
[4+8*0]= h
->non_zero_count
[top_xy
][4];
609 h
->non_zero_count_cache
[5+8*0]= h
->non_zero_count
[top_xy
][5];
610 h
->non_zero_count_cache
[6+8*0]= h
->non_zero_count
[top_xy
][6];
611 h
->non_zero_count_cache
[7+8*0]= h
->non_zero_count
[top_xy
][3];
613 h
->non_zero_count_cache
[1+8*0]= h
->non_zero_count
[top_xy
][9];
614 h
->non_zero_count_cache
[2+8*0]= h
->non_zero_count
[top_xy
][8];
616 h
->non_zero_count_cache
[1+8*3]= h
->non_zero_count
[top_xy
][12];
617 h
->non_zero_count_cache
[2+8*3]= h
->non_zero_count
[top_xy
][11];
620 h
->non_zero_count_cache
[4+8*0]=
621 h
->non_zero_count_cache
[5+8*0]=
622 h
->non_zero_count_cache
[6+8*0]=
623 h
->non_zero_count_cache
[7+8*0]=
625 h
->non_zero_count_cache
[1+8*0]=
626 h
->non_zero_count_cache
[2+8*0]=
628 h
->non_zero_count_cache
[1+8*3]=
629 h
->non_zero_count_cache
[2+8*3]= h
->pps
.cabac
&& !IS_INTRA(mb_type
) ?
0 : 64;
633 for (i
=0; i
<2; i
++) {
635 h
->non_zero_count_cache
[3+8*1 + 2*8*i
]= h
->non_zero_count
[left_xy
[i
]][left_block
[0+2*i
]];
636 h
->non_zero_count_cache
[3+8*2 + 2*8*i
]= h
->non_zero_count
[left_xy
[i
]][left_block
[1+2*i
]];
637 h
->non_zero_count_cache
[0+8*1 + 8*i
]= h
->non_zero_count
[left_xy
[i
]][left_block
[4+2*i
]];
638 h
->non_zero_count_cache
[0+8*4 + 8*i
]= h
->non_zero_count
[left_xy
[i
]][left_block
[5+2*i
]];
640 h
->non_zero_count_cache
[3+8*1 + 2*8*i
]=
641 h
->non_zero_count_cache
[3+8*2 + 2*8*i
]=
642 h
->non_zero_count_cache
[0+8*1 + 8*i
]=
643 h
->non_zero_count_cache
[0+8*4 + 8*i
]= h
->pps
.cabac
&& !IS_INTRA(mb_type
) ?
0 : 64;
650 h
->top_cbp
= h
->cbp_table
[top_xy
];
651 } else if(IS_INTRA(mb_type
)) {
658 h
->left_cbp
= h
->cbp_table
[left_xy
[0]] & 0x1f0;
659 } else if(IS_INTRA(mb_type
)) {
665 h
->left_cbp
|= ((h
->cbp_table
[left_xy
[0]]>>((left_block
[0]&(~1))+1))&0x1) << 1;
668 h
->left_cbp
|= ((h
->cbp_table
[left_xy
[1]]>>((left_block
[2]&(~1))+1))&0x1) << 3;
673 //FIXME direct mb can skip much of this
674 if(IS_INTER(mb_type
) || IS_DIRECT(mb_type
)){
676 for(list
=0; list
<1+(h
->slice_type
==B_TYPE
); list
++){
677 if(!USES_LIST(mb_type
, list
) && !IS_DIRECT(mb_type
) && !h
->deblocking_filter
){
678 /*if(!h->mv_cache_clean[list]){
679 memset(h->mv_cache [list], 0, 8*5*2*sizeof(int16_t)); //FIXME clean only input? clean at all?
680 memset(h->ref_cache[list], PART_NOT_AVAILABLE, 8*5*sizeof(int8_t));
681 h->mv_cache_clean[list]= 1;
685 h
->mv_cache_clean
[list
]= 0;
687 if(IS_INTER(top_type
)){
688 const int b_xy
= h
->mb2b_xy
[top_xy
] + 3*h
->b_stride
;
689 const int b8_xy
= h
->mb2b8_xy
[top_xy
] + h
->b8_stride
;
690 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] + 0 - 1*8]= *(uint32_t*)s
->current_picture
.motion_val
[list
][b_xy
+ 0];
691 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] + 1 - 1*8]= *(uint32_t*)s
->current_picture
.motion_val
[list
][b_xy
+ 1];
692 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] + 2 - 1*8]= *(uint32_t*)s
->current_picture
.motion_val
[list
][b_xy
+ 2];
693 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] + 3 - 1*8]= *(uint32_t*)s
->current_picture
.motion_val
[list
][b_xy
+ 3];
694 h
->ref_cache
[list
][scan8
[0] + 0 - 1*8]=
695 h
->ref_cache
[list
][scan8
[0] + 1 - 1*8]= s
->current_picture
.ref_index
[list
][b8_xy
+ 0];
696 h
->ref_cache
[list
][scan8
[0] + 2 - 1*8]=
697 h
->ref_cache
[list
][scan8
[0] + 3 - 1*8]= s
->current_picture
.ref_index
[list
][b8_xy
+ 1];
699 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] + 0 - 1*8]=
700 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] + 1 - 1*8]=
701 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] + 2 - 1*8]=
702 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] + 3 - 1*8]= 0;
703 *(uint32_t*)&h
->ref_cache
[list
][scan8
[0] + 0 - 1*8]= ((top_type ? LIST_NOT_USED
: PART_NOT_AVAILABLE
)&0xFF)*0x01010101;
706 //FIXME unify cleanup or sth
707 if(IS_INTER(left_type
[0])){
708 const int b_xy
= h
->mb2b_xy
[left_xy
[0]] + 3;
709 const int b8_xy
= h
->mb2b8_xy
[left_xy
[0]] + 1;
710 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] - 1 + 0*8]= *(uint32_t*)s
->current_picture
.motion_val
[list
][b_xy
+ h
->b_stride
*left_block
[0]];
711 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] - 1 + 1*8]= *(uint32_t*)s
->current_picture
.motion_val
[list
][b_xy
+ h
->b_stride
*left_block
[1]];
712 h
->ref_cache
[list
][scan8
[0] - 1 + 0*8]=
713 h
->ref_cache
[list
][scan8
[0] - 1 + 1*8]= s
->current_picture
.ref_index
[list
][b8_xy
+ h
->b8_stride
*(left_block
[0]>>1)];
715 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] - 1 + 0*8]=
716 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] - 1 + 1*8]= 0;
717 h
->ref_cache
[list
][scan8
[0] - 1 + 0*8]=
718 h
->ref_cache
[list
][scan8
[0] - 1 + 1*8]= left_type
[0] ? LIST_NOT_USED
: PART_NOT_AVAILABLE
;
721 if(IS_INTER(left_type
[1])){
722 const int b_xy
= h
->mb2b_xy
[left_xy
[1]] + 3;
723 const int b8_xy
= h
->mb2b8_xy
[left_xy
[1]] + 1;
724 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] - 1 + 2*8]= *(uint32_t*)s
->current_picture
.motion_val
[list
][b_xy
+ h
->b_stride
*left_block
[2]];
725 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] - 1 + 3*8]= *(uint32_t*)s
->current_picture
.motion_val
[list
][b_xy
+ h
->b_stride
*left_block
[3]];
726 h
->ref_cache
[list
][scan8
[0] - 1 + 2*8]=
727 h
->ref_cache
[list
][scan8
[0] - 1 + 3*8]= s
->current_picture
.ref_index
[list
][b8_xy
+ h
->b8_stride
*(left_block
[2]>>1)];
729 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] - 1 + 2*8]=
730 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] - 1 + 3*8]= 0;
731 h
->ref_cache
[list
][scan8
[0] - 1 + 2*8]=
732 h
->ref_cache
[list
][scan8
[0] - 1 + 3*8]= left_type
[0] ? LIST_NOT_USED
: PART_NOT_AVAILABLE
;
733 assert((!left_type
[0]) == (!left_type
[1]));
736 if(for_deblock
|| (IS_DIRECT(mb_type
) && !h
->direct_spatial_mv_pred
))
739 if(IS_INTER(topleft_type
)){
740 const int b_xy
= h
->mb2b_xy
[topleft_xy
] + 3 + 3*h
->b_stride
;
741 const int b8_xy
= h
->mb2b8_xy
[topleft_xy
] + 1 + h
->b8_stride
;
742 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] - 1 - 1*8]= *(uint32_t*)s
->current_picture
.motion_val
[list
][b_xy
];
743 h
->ref_cache
[list
][scan8
[0] - 1 - 1*8]= s
->current_picture
.ref_index
[list
][b8_xy
];
745 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] - 1 - 1*8]= 0;
746 h
->ref_cache
[list
][scan8
[0] - 1 - 1*8]= topleft_type ? LIST_NOT_USED
: PART_NOT_AVAILABLE
;
749 if(IS_INTER(topright_type
)){
750 const int b_xy
= h
->mb2b_xy
[topright_xy
] + 3*h
->b_stride
;
751 const int b8_xy
= h
->mb2b8_xy
[topright_xy
] + h
->b8_stride
;
752 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] + 4 - 1*8]= *(uint32_t*)s
->current_picture
.motion_val
[list
][b_xy
];
753 h
->ref_cache
[list
][scan8
[0] + 4 - 1*8]= s
->current_picture
.ref_index
[list
][b8_xy
];
755 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] + 4 - 1*8]= 0;
756 h
->ref_cache
[list
][scan8
[0] + 4 - 1*8]= topright_type ? LIST_NOT_USED
: PART_NOT_AVAILABLE
;
760 h
->ref_cache
[list
][scan8
[5 ]+1] =
761 h
->ref_cache
[list
][scan8
[7 ]+1] =
762 h
->ref_cache
[list
][scan8
[13]+1] = //FIXME remove past 3 (init somewhere else)
763 h
->ref_cache
[list
][scan8
[4 ]] =
764 h
->ref_cache
[list
][scan8
[12]] = PART_NOT_AVAILABLE
;
765 *(uint32_t*)h
->mv_cache
[list
][scan8
[5 ]+1]=
766 *(uint32_t*)h
->mv_cache
[list
][scan8
[7 ]+1]=
767 *(uint32_t*)h
->mv_cache
[list
][scan8
[13]+1]= //FIXME remove past 3 (init somewhere else)
768 *(uint32_t*)h
->mv_cache
[list
][scan8
[4 ]]=
769 *(uint32_t*)h
->mv_cache
[list
][scan8
[12]]= 0;
772 /* XXX beurk, Load mvd */
773 if(IS_INTER(topleft_type
)){
774 const int b_xy
= h
->mb2b_xy
[topleft_xy
] + 3 + 3*h
->b_stride
;
775 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] - 1 - 1*8]= *(uint32_t*)h
->mvd_table
[list
][b_xy
];
777 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] - 1 - 1*8]= 0;
780 if(IS_INTER(top_type
)){
781 const int b_xy
= h
->mb2b_xy
[top_xy
] + 3*h
->b_stride
;
782 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] + 0 - 1*8]= *(uint32_t*)h
->mvd_table
[list
][b_xy
+ 0];
783 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] + 1 - 1*8]= *(uint32_t*)h
->mvd_table
[list
][b_xy
+ 1];
784 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] + 2 - 1*8]= *(uint32_t*)h
->mvd_table
[list
][b_xy
+ 2];
785 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] + 3 - 1*8]= *(uint32_t*)h
->mvd_table
[list
][b_xy
+ 3];
787 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] + 0 - 1*8]=
788 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] + 1 - 1*8]=
789 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] + 2 - 1*8]=
790 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] + 3 - 1*8]= 0;
792 if(IS_INTER(left_type
[0])){
793 const int b_xy
= h
->mb2b_xy
[left_xy
[0]] + 3;
794 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] - 1 + 0*8]= *(uint32_t*)h
->mvd_table
[list
][b_xy
+ h
->b_stride
*left_block
[0]];
795 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] - 1 + 1*8]= *(uint32_t*)h
->mvd_table
[list
][b_xy
+ h
->b_stride
*left_block
[1]];
797 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] - 1 + 0*8]=
798 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] - 1 + 1*8]= 0;
800 if(IS_INTER(left_type
[1])){
801 const int b_xy
= h
->mb2b_xy
[left_xy
[1]] + 3;
802 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] - 1 + 2*8]= *(uint32_t*)h
->mvd_table
[list
][b_xy
+ h
->b_stride
*left_block
[2]];
803 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] - 1 + 3*8]= *(uint32_t*)h
->mvd_table
[list
][b_xy
+ h
->b_stride
*left_block
[3]];
805 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] - 1 + 2*8]=
806 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] - 1 + 3*8]= 0;
808 *(uint32_t*)h
->mvd_cache
[list
][scan8
[5 ]+1]=
809 *(uint32_t*)h
->mvd_cache
[list
][scan8
[7 ]+1]=
810 *(uint32_t*)h
->mvd_cache
[list
][scan8
[13]+1]= //FIXME remove past 3 (init somewhere else)
811 *(uint32_t*)h
->mvd_cache
[list
][scan8
[4 ]]=
812 *(uint32_t*)h
->mvd_cache
[list
][scan8
[12]]= 0;
814 if(h
->slice_type
== B_TYPE
){
815 fill_rectangle(&h
->direct_cache
[scan8
[0]], 4, 4, 8, 0, 1);
817 if(IS_DIRECT(top_type
)){
818 *(uint32_t*)&h
->direct_cache
[scan8
[0] - 1*8]= 0x01010101;
819 }else if(IS_8X8(top_type
)){
820 int b8_xy
= h
->mb2b8_xy
[top_xy
] + h
->b8_stride
;
821 h
->direct_cache
[scan8
[0] + 0 - 1*8]= h
->direct_table
[b8_xy
];
822 h
->direct_cache
[scan8
[0] + 2 - 1*8]= h
->direct_table
[b8_xy
+ 1];
824 *(uint32_t*)&h
->direct_cache
[scan8
[0] - 1*8]= 0;
828 if(IS_DIRECT(left_type
[0])){
829 h
->direct_cache
[scan8
[0] - 1 + 0*8]=
830 h
->direct_cache
[scan8
[0] - 1 + 2*8]= 1;
831 }else if(IS_8X8(left_type
[0])){
832 int b8_xy
= h
->mb2b8_xy
[left_xy
[0]] + 1;
833 h
->direct_cache
[scan8
[0] - 1 + 0*8]= h
->direct_table
[b8_xy
];
834 h
->direct_cache
[scan8
[0] - 1 + 2*8]= h
->direct_table
[b8_xy
+ h
->b8_stride
];
836 h
->direct_cache
[scan8
[0] - 1 + 0*8]=
837 h
->direct_cache
[scan8
[0] - 1 + 2*8]= 0;
846 static inline void write_back_intra_pred_mode(H264Context
*h
){
847 MpegEncContext
* const s
= &h
->s
;
848 const int mb_xy
= s
->mb_x
+ s
->mb_y
*s
->mb_stride
;
850 h
->intra4x4_pred_mode
[mb_xy
][0]= h
->intra4x4_pred_mode_cache
[7+8*1];
851 h
->intra4x4_pred_mode
[mb_xy
][1]= h
->intra4x4_pred_mode_cache
[7+8*2];
852 h
->intra4x4_pred_mode
[mb_xy
][2]= h
->intra4x4_pred_mode_cache
[7+8*3];
853 h
->intra4x4_pred_mode
[mb_xy
][3]= h
->intra4x4_pred_mode_cache
[7+8*4];
854 h
->intra4x4_pred_mode
[mb_xy
][4]= h
->intra4x4_pred_mode_cache
[4+8*4];
855 h
->intra4x4_pred_mode
[mb_xy
][5]= h
->intra4x4_pred_mode_cache
[5+8*4];
856 h
->intra4x4_pred_mode
[mb_xy
][6]= h
->intra4x4_pred_mode_cache
[6+8*4];
860 * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
862 static inline int check_intra4x4_pred_mode(H264Context
*h
){
863 MpegEncContext
* const s
= &h
->s
;
864 static const int8_t top
[12]= {-1, 0,LEFT_DC_PRED
,-1,-1,-1,-1,-1, 0};
865 static const int8_t left
[12]= { 0,-1, TOP_DC_PRED
, 0,-1,-1,-1, 0,-1,DC_128_PRED
};
868 if(!(h
->top_samples_available
&0x8000)){
870 int status
= top
[ h
->intra4x4_pred_mode_cache
[scan8
[0] + i
] ];
872 av_log(h
->s
.avctx
, AV_LOG_ERROR
, "top block unavailable for requested intra4x4 mode %d at %d %d\n", status
, s
->mb_x
, s
->mb_y
);
875 h
->intra4x4_pred_mode_cache
[scan8
[0] + i
]= status
;
880 if(!(h
->left_samples_available
&0x8000)){
882 int status
= left
[ h
->intra4x4_pred_mode_cache
[scan8
[0] + 8*i
] ];
884 av_log(h
->s
.avctx
, AV_LOG_ERROR
, "left block unavailable for requested intra4x4 mode %d at %d %d\n", status
, s
->mb_x
, s
->mb_y
);
887 h
->intra4x4_pred_mode_cache
[scan8
[0] + 8*i
]= status
;
893 } //FIXME cleanup like next
896 * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
898 static inline int check_intra_pred_mode(H264Context
*h
, int mode
){
899 MpegEncContext
* const s
= &h
->s
;
900 static const int8_t top
[7]= {LEFT_DC_PRED8x8
, 1,-1,-1};
901 static const int8_t left
[7]= { TOP_DC_PRED8x8
,-1, 2,-1,DC_128_PRED8x8
};
903 if(mode
< 0 || mode
> 6) {
904 av_log(h
->s
.avctx
, AV_LOG_ERROR
, "out of range intra chroma pred mode at %d %d\n", s
->mb_x
, s
->mb_y
);
908 if(!(h
->top_samples_available
&0x8000)){
911 av_log(h
->s
.avctx
, AV_LOG_ERROR
, "top block unavailable for requested intra mode at %d %d\n", s
->mb_x
, s
->mb_y
);
916 if(!(h
->left_samples_available
&0x8000)){
919 av_log(h
->s
.avctx
, AV_LOG_ERROR
, "left block unavailable for requested intra mode at %d %d\n", s
->mb_x
, s
->mb_y
);
928 * gets the predicted intra4x4 prediction mode.
930 static inline int pred_intra_mode(H264Context
*h
, int n
){
931 const int index8
= scan8
[n
];
932 const int left
= h
->intra4x4_pred_mode_cache
[index8
- 1];
933 const int top
= h
->intra4x4_pred_mode_cache
[index8
- 8];
934 const int min
= FFMIN(left
, top
);
936 tprintf("mode:%d %d min:%d\n", left
,top
, min
);
938 if(min
<0) return DC_PRED
;
942 static inline void write_back_non_zero_count(H264Context
*h
){
943 MpegEncContext
* const s
= &h
->s
;
944 const int mb_xy
= s
->mb_x
+ s
->mb_y
*s
->mb_stride
;
946 h
->non_zero_count
[mb_xy
][0]= h
->non_zero_count_cache
[7+8*1];
947 h
->non_zero_count
[mb_xy
][1]= h
->non_zero_count_cache
[7+8*2];
948 h
->non_zero_count
[mb_xy
][2]= h
->non_zero_count_cache
[7+8*3];
949 h
->non_zero_count
[mb_xy
][3]= h
->non_zero_count_cache
[7+8*4];
950 h
->non_zero_count
[mb_xy
][4]= h
->non_zero_count_cache
[4+8*4];
951 h
->non_zero_count
[mb_xy
][5]= h
->non_zero_count_cache
[5+8*4];
952 h
->non_zero_count
[mb_xy
][6]= h
->non_zero_count_cache
[6+8*4];
954 h
->non_zero_count
[mb_xy
][9]= h
->non_zero_count_cache
[1+8*2];
955 h
->non_zero_count
[mb_xy
][8]= h
->non_zero_count_cache
[2+8*2];
956 h
->non_zero_count
[mb_xy
][7]= h
->non_zero_count_cache
[2+8*1];
958 h
->non_zero_count
[mb_xy
][12]=h
->non_zero_count_cache
[1+8*5];
959 h
->non_zero_count
[mb_xy
][11]=h
->non_zero_count_cache
[2+8*5];
960 h
->non_zero_count
[mb_xy
][10]=h
->non_zero_count_cache
[2+8*4];
964 * gets the predicted number of non zero coefficients.
965 * @param n block index
967 static inline int pred_non_zero_count(H264Context
*h
, int n
){
968 const int index8
= scan8
[n
];
969 const int left
= h
->non_zero_count_cache
[index8
- 1];
970 const int top
= h
->non_zero_count_cache
[index8
- 8];
973 if(i
<64) i
= (i
+1)>>1;
975 tprintf("pred_nnz L%X T%X n%d s%d P%X\n", left
, top
, n
, scan8
[n
], i
&31);
980 static inline int fetch_diagonal_mv(H264Context
*h
, const int16_t **C
, int i
, int list
, int part_width
){
981 const int topright_ref
= h
->ref_cache
[list
][ i
- 8 + part_width
];
983 if(topright_ref
!= PART_NOT_AVAILABLE
){
984 *C
= h
->mv_cache
[list
][ i
- 8 + part_width
];
987 tprintf("topright MV not available\n");
989 *C
= h
->mv_cache
[list
][ i
- 8 - 1 ];
990 return h
->ref_cache
[list
][ i
- 8 - 1 ];
995 * gets the predicted MV.
996 * @param n the block index
997 * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
998 * @param mx the x component of the predicted motion vector
999 * @param my the y component of the predicted motion vector
1001 static inline void pred_motion(H264Context
* const h
, int n
, int part_width
, int list
, int ref
, int * const mx
, int * const my
){
1002 const int index8
= scan8
[n
];
1003 const int top_ref
= h
->ref_cache
[list
][ index8
- 8 ];
1004 const int left_ref
= h
->ref_cache
[list
][ index8
- 1 ];
1005 const int16_t * const A
= h
->mv_cache
[list
][ index8
- 1 ];
1006 const int16_t * const B
= h
->mv_cache
[list
][ index8
- 8 ];
1008 int diagonal_ref
, match_count
;
1010 assert(part_width
==1 || part_width
==2 || part_width
==4);
1020 diagonal_ref
= fetch_diagonal_mv(h
, &C
, index8
, list
, part_width
);
1021 match_count
= (diagonal_ref
==ref
) + (top_ref
==ref
) + (left_ref
==ref
);
1022 tprintf("pred_motion match_count=%d\n", match_count
);
1023 if(match_count
> 1){ //most common
1024 *mx
= mid_pred(A
[0], B
[0], C
[0]);
1025 *my
= mid_pred(A
[1], B
[1], C
[1]);
1026 }else if(match_count
==1){
1030 }else if(top_ref
==ref
){
1038 if(top_ref
== PART_NOT_AVAILABLE
&& diagonal_ref
== PART_NOT_AVAILABLE
&& left_ref
!= PART_NOT_AVAILABLE
){
1042 *mx
= mid_pred(A
[0], B
[0], C
[0]);
1043 *my
= mid_pred(A
[1], B
[1], C
[1]);
1047 tprintf("pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref
, B
[0], B
[1], diagonal_ref
, C
[0], C
[1], left_ref
, A
[0], A
[1], ref
, *mx
, *my
, h
->s
.mb_x
, h
->s
.mb_y
, n
, list
);
1051 * gets the directionally predicted 16x8 MV.
1052 * @param n the block index
1053 * @param mx the x component of the predicted motion vector
1054 * @param my the y component of the predicted motion vector
1056 static inline void pred_16x8_motion(H264Context
* const h
, int n
, int list
, int ref
, int * const mx
, int * const my
){
1058 const int top_ref
= h
->ref_cache
[list
][ scan8
[0] - 8 ];
1059 const int16_t * const B
= h
->mv_cache
[list
][ scan8
[0] - 8 ];
1061 tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref
, B
[0], B
[1], h
->s
.mb_x
, h
->s
.mb_y
, n
, list
);
1069 const int left_ref
= h
->ref_cache
[list
][ scan8
[8] - 1 ];
1070 const int16_t * const A
= h
->mv_cache
[list
][ scan8
[8] - 1 ];
1072 tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", left_ref
, A
[0], A
[1], h
->s
.mb_x
, h
->s
.mb_y
, n
, list
);
1074 if(left_ref
== ref
){
1082 pred_motion(h
, n
, 4, list
, ref
, mx
, my
);
1086 * gets the directionally predicted 8x16 MV.
1087 * @param n the block index
1088 * @param mx the x component of the predicted motion vector
1089 * @param my the y component of the predicted motion vector
1091 static inline void pred_8x16_motion(H264Context
* const h
, int n
, int list
, int ref
, int * const mx
, int * const my
){
1093 const int left_ref
= h
->ref_cache
[list
][ scan8
[0] - 1 ];
1094 const int16_t * const A
= h
->mv_cache
[list
][ scan8
[0] - 1 ];
1096 tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", left_ref
, A
[0], A
[1], h
->s
.mb_x
, h
->s
.mb_y
, n
, list
);
1098 if(left_ref
== ref
){
1107 diagonal_ref
= fetch_diagonal_mv(h
, &C
, scan8
[4], list
, 2);
1109 tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", diagonal_ref
, C
[0], C
[1], h
->s
.mb_x
, h
->s
.mb_y
, n
, list
);
1111 if(diagonal_ref
== ref
){
1119 pred_motion(h
, n
, 2, list
, ref
, mx
, my
);
1122 static inline void pred_pskip_motion(H264Context
* const h
, int * const mx
, int * const my
){
1123 const int top_ref
= h
->ref_cache
[0][ scan8
[0] - 8 ];
1124 const int left_ref
= h
->ref_cache
[0][ scan8
[0] - 1 ];
1126 tprintf("pred_pskip: (%d) (%d) at %2d %2d\n", top_ref
, left_ref
, h
->s
.mb_x
, h
->s
.mb_y
);
1128 if(top_ref
== PART_NOT_AVAILABLE
|| left_ref
== PART_NOT_AVAILABLE
1129 || (top_ref
== 0 && *(uint32_t*)h
->mv_cache
[0][ scan8
[0] - 8 ] == 0)
1130 || (left_ref
== 0 && *(uint32_t*)h
->mv_cache
[0][ scan8
[0] - 1 ] == 0)){
1136 pred_motion(h
, 0, 4, 0, 0, mx
, my
);
1141 static inline void direct_dist_scale_factor(H264Context
* const h
){
1142 const int poc
= h
->s
.current_picture_ptr
->poc
;
1143 const int poc1
= h
->ref_list
[1][0].poc
;
1145 for(i
=0; i
<h
->ref_count
[0]; i
++){
1146 int poc0
= h
->ref_list
[0][i
].poc
;
1147 int td
= clip(poc1
- poc0
, -128, 127);
1148 if(td
== 0 /* FIXME || pic0 is a long-term ref */){
1149 h
->dist_scale_factor
[i
] = 256;
1151 int tb
= clip(poc
- poc0
, -128, 127);
1152 int tx
= (16384 + (ABS(td
) >> 1)) / td
;
1153 h
->dist_scale_factor
[i
] = clip((tb
*tx
+ 32) >> 6, -1024, 1023);
1157 static inline void direct_ref_list_init(H264Context
* const h
){
1158 MpegEncContext
* const s
= &h
->s
;
1159 Picture
* const ref1
= &h
->ref_list
[1][0];
1160 Picture
* const cur
= s
->current_picture_ptr
;
1162 if(cur
->pict_type
== I_TYPE
)
1163 cur
->ref_count
[0] = 0;
1164 if(cur
->pict_type
!= B_TYPE
)
1165 cur
->ref_count
[1] = 0;
1166 for(list
=0; list
<2; list
++){
1167 cur
->ref_count
[list
] = h
->ref_count
[list
];
1168 for(j
=0; j
<h
->ref_count
[list
]; j
++)
1169 cur
->ref_poc
[list
][j
] = h
->ref_list
[list
][j
].poc
;
1171 if(cur
->pict_type
!= B_TYPE
|| h
->direct_spatial_mv_pred
)
1173 for(list
=0; list
<2; list
++){
1174 for(i
=0; i
<ref1
->ref_count
[list
]; i
++){
1175 const int poc
= ref1
->ref_poc
[list
][i
];
1176 h
->map_col_to_list0
[list
][i
] = PART_NOT_AVAILABLE
;
1177 for(j
=0; j
<h
->ref_count
[list
]; j
++)
1178 if(h
->ref_list
[list
][j
].poc
== poc
){
1179 h
->map_col_to_list0
[list
][i
] = j
;
1186 static inline void pred_direct_motion(H264Context
* const h
, int *mb_type
){
1187 MpegEncContext
* const s
= &h
->s
;
1188 const int mb_xy
= s
->mb_x
+ s
->mb_y
*s
->mb_stride
;
1189 const int b8_xy
= 2*s
->mb_x
+ 2*s
->mb_y
*h
->b8_stride
;
1190 const int b4_xy
= 4*s
->mb_x
+ 4*s
->mb_y
*h
->b_stride
;
1191 const int mb_type_col
= h
->ref_list
[1][0].mb_type
[mb_xy
];
1192 const int16_t (*l1mv0
)[2] = (const int16_t (*)[2]) &h
->ref_list
[1][0].motion_val
[0][b4_xy
];
1193 const int8_t *l1ref0
= &h
->ref_list
[1][0].ref_index
[0][b8_xy
];
1194 const int8_t *l1ref1
= &h
->ref_list
[1][0].ref_index
[1][b8_xy
];
1195 const int is_b8x8
= IS_8X8(*mb_type
);
1199 if(IS_8X8(mb_type_col
) && !h
->sps
.direct_8x8_inference_flag
){
1200 /* FIXME save sub mb types from previous frames (or derive from MVs)
1201 * so we know exactly what block size to use */
1202 sub_mb_type
= MB_TYPE_8x8
|MB_TYPE_P0L0
|MB_TYPE_P0L1
|MB_TYPE_DIRECT2
; /* B_SUB_4x4 */
1203 *mb_type
= MB_TYPE_8x8
|MB_TYPE_L0L1
;
1204 }else if(!is_b8x8
&& (IS_16X16(mb_type_col
) || IS_INTRA(mb_type_col
))){
1205 sub_mb_type
= MB_TYPE_16x16
|MB_TYPE_P0L0
|MB_TYPE_P0L1
|MB_TYPE_DIRECT2
; /* B_SUB_8x8 */
1206 *mb_type
= MB_TYPE_16x16
|MB_TYPE_P0L0
|MB_TYPE_P0L1
|MB_TYPE_DIRECT2
; /* B_16x16 */
1208 sub_mb_type
= MB_TYPE_16x16
|MB_TYPE_P0L0
|MB_TYPE_P0L1
|MB_TYPE_DIRECT2
; /* B_SUB_8x8 */
1209 *mb_type
= MB_TYPE_8x8
|MB_TYPE_L0L1
;
1212 *mb_type
|= MB_TYPE_DIRECT2
;
1214 tprintf("mb_type = %08x, sub_mb_type = %08x, is_b8x8 = %d, mb_type_col = %08x\n", *mb_type
, sub_mb_type
, is_b8x8
, mb_type_col
);
1216 if(h
->direct_spatial_mv_pred
){
1221 /* ref = min(neighbors) */
1222 for(list
=0; list
<2; list
++){
1223 int refa
= h
->ref_cache
[list
][scan8
[0] - 1];
1224 int refb
= h
->ref_cache
[list
][scan8
[0] - 8];
1225 int refc
= h
->ref_cache
[list
][scan8
[0] - 8 + 4];
1227 refc
= h
->ref_cache
[list
][scan8
[0] - 8 - 1];
1229 if(ref
[list
] < 0 || (refb
< ref
[list
] && refb
>= 0))
1231 if(ref
[list
] < 0 || (refc
< ref
[list
] && refc
>= 0))
1237 if(ref
[0] < 0 && ref
[1] < 0){
1238 ref
[0] = ref
[1] = 0;
1239 mv
[0][0] = mv
[0][1] =
1240 mv
[1][0] = mv
[1][1] = 0;
1242 for(list
=0; list
<2; list
++){
1244 pred_motion(h
, 0, 4, list
, ref
[list
], &mv
[list
][0], &mv
[list
][1]);
1246 mv
[list
][0] = mv
[list
][1] = 0;
1251 *mb_type
&= ~MB_TYPE_P0L1
;
1252 sub_mb_type
&= ~MB_TYPE_P0L1
;
1253 }else if(ref
[0] < 0){
1254 *mb_type
&= ~MB_TYPE_P0L0
;
1255 sub_mb_type
&= ~MB_TYPE_P0L0
;
1258 if(IS_16X16(*mb_type
)){
1259 fill_rectangle(&h
->ref_cache
[0][scan8
[0]], 4, 4, 8, ref
[0], 1);
1260 fill_rectangle(&h
->ref_cache
[1][scan8
[0]], 4, 4, 8, ref
[1], 1);
1261 if(!IS_INTRA(mb_type_col
) && l1ref0
[0] == 0 &&
1262 ABS(l1mv0
[0][0]) <= 1 && ABS(l1mv0
[0][1]) <= 1){
1264 fill_rectangle(&h
->mv_cache
[0][scan8
[0]], 4, 4, 8, pack16to32(mv
[0][0],mv
[0][1]), 4);
1266 fill_rectangle(&h
->mv_cache
[0][scan8
[0]], 4, 4, 8, 0, 4);
1268 fill_rectangle(&h
->mv_cache
[1][scan8
[0]], 4, 4, 8, pack16to32(mv
[1][0],mv
[1][1]), 4);
1270 fill_rectangle(&h
->mv_cache
[1][scan8
[0]], 4, 4, 8, 0, 4);
1272 fill_rectangle(&h
->mv_cache
[0][scan8
[0]], 4, 4, 8, pack16to32(mv
[0][0],mv
[0][1]), 4);
1273 fill_rectangle(&h
->mv_cache
[1][scan8
[0]], 4, 4, 8, pack16to32(mv
[1][0],mv
[1][1]), 4);
1276 for(i8
=0; i8
<4; i8
++){
1277 const int x8
= i8
&1;
1278 const int y8
= i8
>>1;
1280 if(is_b8x8
&& !IS_DIRECT(h
->sub_mb_type
[i8
]))
1282 h
->sub_mb_type
[i8
] = sub_mb_type
;
1284 fill_rectangle(&h
->mv_cache
[0][scan8
[i8
*4]], 2, 2, 8, pack16to32(mv
[0][0],mv
[0][1]), 4);
1285 fill_rectangle(&h
->mv_cache
[1][scan8
[i8
*4]], 2, 2, 8, pack16to32(mv
[1][0],mv
[1][1]), 4);
1286 fill_rectangle(&h
->ref_cache
[0][scan8
[i8
*4]], 2, 2, 8, ref
[0], 1);
1287 fill_rectangle(&h
->ref_cache
[1][scan8
[i8
*4]], 2, 2, 8, ref
[1], 1);
1290 if(!IS_INTRA(mb_type_col
) && l1ref0
[x8
+ y8
*h
->b8_stride
] == 0){
1291 for(i4
=0; i4
<4; i4
++){
1292 const int16_t *mv_col
= l1mv0
[x8
*2 + (i4
&1) + (y8
*2 + (i4
>>1))*h
->b_stride
];
1293 if(ABS(mv_col
[0]) <= 1 && ABS(mv_col
[1]) <= 1){
1295 *(uint32_t*)h
->mv_cache
[0][scan8
[i8
*4+i4
]] = 0;
1297 *(uint32_t*)h
->mv_cache
[1][scan8
[i8
*4+i4
]] = 0;
1303 }else{ /* direct temporal mv pred */
1304 if(IS_16X16(*mb_type
)){
1305 fill_rectangle(&h
->ref_cache
[1][scan8
[0]], 4, 4, 8, 0, 1);
1306 if(IS_INTRA(mb_type_col
)){
1307 fill_rectangle(&h
->ref_cache
[0][scan8
[0]], 4, 4, 8, 0, 1);
1308 fill_rectangle(&h
-> mv_cache
[0][scan8
[0]], 4, 4, 8, 0, 4);
1309 fill_rectangle(&h
-> mv_cache
[1][scan8
[0]], 4, 4, 8, 0, 4);
1311 const int ref0
= l1ref0
[0] >= 0 ? h
->map_col_to_list0
[0][l1ref0
[0]]
1312 : h
->map_col_to_list0
[1][l1ref1
[0]];
1313 const int dist_scale_factor
= h
->dist_scale_factor
[ref0
];
1314 const int16_t *mv_col
= l1mv0
[0];
1316 mv_l0
[0] = (dist_scale_factor
* mv_col
[0] + 128) >> 8;
1317 mv_l0
[1] = (dist_scale_factor
* mv_col
[1] + 128) >> 8;
1318 fill_rectangle(&h
->ref_cache
[0][scan8
[0]], 4, 4, 8, ref0
, 1);
1319 fill_rectangle(&h
-> mv_cache
[0][scan8
[0]], 4, 4, 8, pack16to32(mv_l0
[0],mv_l0
[1]), 4);
1320 fill_rectangle(&h
-> mv_cache
[1][scan8
[0]], 4, 4, 8, pack16to32(mv_l0
[0]-mv_col
[0],mv_l0
[1]-mv_col
[1]), 4);
1323 for(i8
=0; i8
<4; i8
++){
1324 const int x8
= i8
&1;
1325 const int y8
= i8
>>1;
1326 int ref0
, dist_scale_factor
;
1328 if(is_b8x8
&& !IS_DIRECT(h
->sub_mb_type
[i8
]))
1330 h
->sub_mb_type
[i8
] = sub_mb_type
;
1331 if(IS_INTRA(mb_type_col
)){
1332 fill_rectangle(&h
->ref_cache
[0][scan8
[i8
*4]], 2, 2, 8, 0, 1);
1333 fill_rectangle(&h
->ref_cache
[1][scan8
[i8
*4]], 2, 2, 8, 0, 1);
1334 fill_rectangle(&h
-> mv_cache
[0][scan8
[i8
*4]], 2, 2, 8, 0, 4);
1335 fill_rectangle(&h
-> mv_cache
[1][scan8
[i8
*4]], 2, 2, 8, 0, 4);
1339 ref0
= l1ref0
[x8
+ y8
*h
->b8_stride
];
1341 ref0
= h
->map_col_to_list0
[0][ref0
];
1343 ref0
= h
->map_col_to_list0
[1][l1ref1
[x8
+ y8
*h
->b8_stride
]];
1344 dist_scale_factor
= h
->dist_scale_factor
[ref0
];
1346 fill_rectangle(&h
->ref_cache
[0][scan8
[i8
*4]], 2, 2, 8, ref0
, 1);
1347 fill_rectangle(&h
->ref_cache
[1][scan8
[i8
*4]], 2, 2, 8, 0, 1);
1348 for(i4
=0; i4
<4; i4
++){
1349 const int16_t *mv_col
= l1mv0
[x8
*2 + (i4
&1) + (y8
*2 + (i4
>>1))*h
->b_stride
];
1350 int16_t *mv_l0
= h
->mv_cache
[0][scan8
[i8
*4+i4
]];
1351 mv_l0
[0] = (dist_scale_factor
* mv_col
[0] + 128) >> 8;
1352 mv_l0
[1] = (dist_scale_factor
* mv_col
[1] + 128) >> 8;
1353 *(uint32_t*)h
->mv_cache
[1][scan8
[i8
*4+i4
]] =
1354 pack16to32(mv_l0
[0]-mv_col
[0],mv_l0
[1]-mv_col
[1]);
1361 static inline void write_back_motion(H264Context
*h
, int mb_type
){
1362 MpegEncContext
* const s
= &h
->s
;
1363 const int b_xy
= 4*s
->mb_x
+ 4*s
->mb_y
*h
->b_stride
;
1364 const int b8_xy
= 2*s
->mb_x
+ 2*s
->mb_y
*h
->b8_stride
;
1367 for(list
=0; list
<2; list
++){
1369 if(!USES_LIST(mb_type
, list
)){
1370 if(1){ //FIXME skip or never read if mb_type doesn't use it
1372 *(uint64_t*)s
->current_picture
.motion_val
[list
][b_xy
+ 0 + y
*h
->b_stride
]=
1373 *(uint64_t*)s
->current_picture
.motion_val
[list
][b_xy
+ 2 + y
*h
->b_stride
]= 0;
1375 if( h
->pps
.cabac
) {
1376 /* FIXME needed ? */
1378 *(uint64_t*)h
->mvd_table
[list
][b_xy
+ 0 + y
*h
->b_stride
]=
1379 *(uint64_t*)h
->mvd_table
[list
][b_xy
+ 2 + y
*h
->b_stride
]= 0;
1383 s
->current_picture
.ref_index
[list
][b8_xy
+ 0 + y
*h
->b8_stride
]=
1384 s
->current_picture
.ref_index
[list
][b8_xy
+ 1 + y
*h
->b8_stride
]= LIST_NOT_USED
;
1391 *(uint64_t*)s
->current_picture
.motion_val
[list
][b_xy
+ 0 + y
*h
->b_stride
]= *(uint64_t*)h
->mv_cache
[list
][scan8
[0]+0 + 8*y
];
1392 *(uint64_t*)s
->current_picture
.motion_val
[list
][b_xy
+ 2 + y
*h
->b_stride
]= *(uint64_t*)h
->mv_cache
[list
][scan8
[0]+2 + 8*y
];
1394 if( h
->pps
.cabac
) {
1396 *(uint64_t*)h
->mvd_table
[list
][b_xy
+ 0 + y
*h
->b_stride
]= *(uint64_t*)h
->mvd_cache
[list
][scan8
[0]+0 + 8*y
];
1397 *(uint64_t*)h
->mvd_table
[list
][b_xy
+ 2 + y
*h
->b_stride
]= *(uint64_t*)h
->mvd_cache
[list
][scan8
[0]+2 + 8*y
];
1401 s
->current_picture
.ref_index
[list
][b8_xy
+ 0 + y
*h
->b8_stride
]= h
->ref_cache
[list
][scan8
[0]+0 + 16*y
];
1402 s
->current_picture
.ref_index
[list
][b8_xy
+ 1 + y
*h
->b8_stride
]= h
->ref_cache
[list
][scan8
[0]+2 + 16*y
];
1406 if(h
->slice_type
== B_TYPE
&& h
->pps
.cabac
){
1407 if(IS_8X8(mb_type
)){
1408 h
->direct_table
[b8_xy
+1+0*h
->b8_stride
] = IS_DIRECT(h
->sub_mb_type
[1]) ?
1 : 0;
1409 h
->direct_table
[b8_xy
+0+1*h
->b8_stride
] = IS_DIRECT(h
->sub_mb_type
[2]) ?
1 : 0;
1410 h
->direct_table
[b8_xy
+1+1*h
->b8_stride
] = IS_DIRECT(h
->sub_mb_type
[3]) ?
1 : 0;
1416 * Decodes a network abstraction layer unit.
1417 * @param consumed is the number of bytes used as input
1418 * @param length is the length of the array
1419 * @param dst_length is the number of decoded bytes FIXME here or a decode rbsp tailing?
1420 * @returns decoded bytes, might be src+1 if no escapes
1422 static uint8_t *decode_nal(H264Context
*h
, uint8_t *src
, int *dst_length
, int *consumed
, int length
){
1426 // src[0]&0x80; //forbidden bit
1427 h
->nal_ref_idc
= src
[0]>>5;
1428 h
->nal_unit_type
= src
[0]&0x1F;
1432 for(i
=0; i
<length
; i
++)
1433 printf("%2X ", src
[i
]);
1435 for(i
=0; i
+1<length
; i
+=2){
1436 if(src
[i
]) continue;
1437 if(i
>0 && src
[i
-1]==0) i
--;
1438 if(i
+2<length
&& src
[i
+1]==0 && src
[i
+2]<=3){
1440 /* startcode, so we must be past the end */
1447 if(i
>=length
-1){ //no escaped 0
1448 *dst_length
= length
;
1449 *consumed
= length
+1; //+1 for the header
1453 h
->rbsp_buffer
= av_fast_realloc(h
->rbsp_buffer
, &h
->rbsp_buffer_size
, length
);
1454 dst
= h
->rbsp_buffer
;
1456 //printf("decoding esc\n");
1459 //remove escapes (very rare 1:2^22)
1460 if(si
+2<length
&& src
[si
]==0 && src
[si
+1]==0 && src
[si
+2]<=3){
1461 if(src
[si
+2]==3){ //escape
1466 }else //next start code
1470 dst
[di
++]= src
[si
++];
1474 *consumed
= si
+ 1;//+1 for the header
1475 //FIXME store exact number of bits in the getbitcontext (its needed for decoding)
1481 * @param src the data which should be escaped
1482 * @param dst the target buffer, dst+1 == src is allowed as a special case
1483 * @param length the length of the src data
1484 * @param dst_length the length of the dst array
1485 * @returns length of escaped data in bytes or -1 if an error occured
1487 static int encode_nal(H264Context
*h
, uint8_t *dst
, uint8_t *src
, int length
, int dst_length
){
1488 int i
, escape_count
, si
, di
;
1492 assert(dst_length
>0);
1494 dst
[0]= (h
->nal_ref_idc
<<5) + h
->nal_unit_type
;
1496 if(length
==0) return 1;
1499 for(i
=0; i
<length
; i
+=2){
1500 if(src
[i
]) continue;
1501 if(i
>0 && src
[i
-1]==0)
1503 if(i
+2<length
&& src
[i
+1]==0 && src
[i
+2]<=3){
1509 if(escape_count
==0){
1511 memcpy(dst
+1, src
, length
);
1515 if(length
+ escape_count
+ 1> dst_length
)
1518 //this should be damn rare (hopefully)
1520 h
->rbsp_buffer
= av_fast_realloc(h
->rbsp_buffer
, &h
->rbsp_buffer_size
, length
+ escape_count
);
1521 temp
= h
->rbsp_buffer
;
1522 //printf("encoding esc\n");
1527 if(si
+2<length
&& src
[si
]==0 && src
[si
+1]==0 && src
[si
+2]<=3){
1528 temp
[di
++]= 0; si
++;
1529 temp
[di
++]= 0; si
++;
1531 temp
[di
++]= src
[si
++];
1534 temp
[di
++]= src
[si
++];
1536 memcpy(dst
+1, temp
, length
+escape_count
);
1538 assert(di
== length
+escape_count
);
1544 * write 1,10,100,1000,... for alignment, yes its exactly inverse to mpeg4
1546 static void encode_rbsp_trailing(PutBitContext
*pb
){
1549 length
= (-put_bits_count(pb
))&7;
1550 if(length
) put_bits(pb
, length
, 0);
1555 * identifies the exact end of the bitstream
1556 * @return the length of the trailing, or 0 if damaged
1558 static int decode_rbsp_trailing(uint8_t *src
){
1562 tprintf("rbsp trailing %X\n", v
);
1572 * idct tranforms the 16 dc values and dequantize them.
1573 * @param qp quantization parameter
1575 static void h264_luma_dc_dequant_idct_c(DCTELEM
*block
, int qp
){
1576 const int qmul
= dequant_coeff
[qp
][0];
1579 int temp
[16]; //FIXME check if this is a good idea
1580 static const int x_offset
[4]={0, 1*stride
, 4* stride
, 5*stride
};
1581 static const int y_offset
[4]={0, 2*stride
, 8* stride
, 10*stride
};
1583 //memset(block, 64, 2*256);
1586 const int offset
= y_offset
[i
];
1587 const int z0
= block
[offset
+stride
*0] + block
[offset
+stride
*4];
1588 const int z1
= block
[offset
+stride
*0] - block
[offset
+stride
*4];
1589 const int z2
= block
[offset
+stride
*1] - block
[offset
+stride
*5];
1590 const int z3
= block
[offset
+stride
*1] + block
[offset
+stride
*5];
1599 const int offset
= x_offset
[i
];
1600 const int z0
= temp
[4*0+i
] + temp
[4*2+i
];
1601 const int z1
= temp
[4*0+i
] - temp
[4*2+i
];
1602 const int z2
= temp
[4*1+i
] - temp
[4*3+i
];
1603 const int z3
= temp
[4*1+i
] + temp
[4*3+i
];
1605 block
[stride
*0 +offset
]= ((z0
+ z3
)*qmul
+ 2)>>2; //FIXME think about merging this into decode_resdual
1606 block
[stride
*2 +offset
]= ((z1
+ z2
)*qmul
+ 2)>>2;
1607 block
[stride
*8 +offset
]= ((z1
- z2
)*qmul
+ 2)>>2;
1608 block
[stride
*10+offset
]= ((z0
- z3
)*qmul
+ 2)>>2;
1614 * dct tranforms the 16 dc values.
1615 * @param qp quantization parameter ??? FIXME
1617 static void h264_luma_dc_dct_c(DCTELEM
*block
/*, int qp*/){
1618 // const int qmul= dequant_coeff[qp][0];
1620 int temp
[16]; //FIXME check if this is a good idea
1621 static const int x_offset
[4]={0, 1*stride
, 4* stride
, 5*stride
};
1622 static const int y_offset
[4]={0, 2*stride
, 8* stride
, 10*stride
};
1625 const int offset
= y_offset
[i
];
1626 const int z0
= block
[offset
+stride
*0] + block
[offset
+stride
*4];
1627 const int z1
= block
[offset
+stride
*0] - block
[offset
+stride
*4];
1628 const int z2
= block
[offset
+stride
*1] - block
[offset
+stride
*5];
1629 const int z3
= block
[offset
+stride
*1] + block
[offset
+stride
*5];
1638 const int offset
= x_offset
[i
];
1639 const int z0
= temp
[4*0+i
] + temp
[4*2+i
];
1640 const int z1
= temp
[4*0+i
] - temp
[4*2+i
];
1641 const int z2
= temp
[4*1+i
] - temp
[4*3+i
];
1642 const int z3
= temp
[4*1+i
] + temp
[4*3+i
];
1644 block
[stride
*0 +offset
]= (z0
+ z3
)>>1;
1645 block
[stride
*2 +offset
]= (z1
+ z2
)>>1;
1646 block
[stride
*8 +offset
]= (z1
- z2
)>>1;
1647 block
[stride
*10+offset
]= (z0
- z3
)>>1;
1655 static void chroma_dc_dequant_idct_c(DCTELEM
*block
, int qp
){
1656 const int qmul
= dequant_coeff
[qp
][0];
1657 const int stride
= 16*2;
1658 const int xStride
= 16;
1661 a
= block
[stride
*0 + xStride
*0];
1662 b
= block
[stride
*0 + xStride
*1];
1663 c
= block
[stride
*1 + xStride
*0];
1664 d
= block
[stride
*1 + xStride
*1];
1671 block
[stride
*0 + xStride
*0]= ((a
+c
)*qmul
+ 0)>>1;
1672 block
[stride
*0 + xStride
*1]= ((e
+b
)*qmul
+ 0)>>1;
1673 block
[stride
*1 + xStride
*0]= ((a
-c
)*qmul
+ 0)>>1;
1674 block
[stride
*1 + xStride
*1]= ((e
-b
)*qmul
+ 0)>>1;
1678 static void chroma_dc_dct_c(DCTELEM
*block
){
1679 const int stride
= 16*2;
1680 const int xStride
= 16;
1683 a
= block
[stride
*0 + xStride
*0];
1684 b
= block
[stride
*0 + xStride
*1];
1685 c
= block
[stride
*1 + xStride
*0];
1686 d
= block
[stride
*1 + xStride
*1];
1693 block
[stride
*0 + xStride
*0]= (a
+c
);
1694 block
[stride
*0 + xStride
*1]= (e
+b
);
1695 block
[stride
*1 + xStride
*0]= (a
-c
);
1696 block
[stride
*1 + xStride
*1]= (e
-b
);
1701 * gets the chroma qp.
1703 static inline int get_chroma_qp(int chroma_qp_index_offset
, int qscale
){
1705 return chroma_qp
[clip(qscale
+ chroma_qp_index_offset
, 0, 51)];
1710 static void h264_diff_dct_c(DCTELEM
*block
, uint8_t *src1
, uint8_t *src2
, int stride
){
1712 //FIXME try int temp instead of block
1715 const int d0
= src1
[0 + i
*stride
] - src2
[0 + i
*stride
];
1716 const int d1
= src1
[1 + i
*stride
] - src2
[1 + i
*stride
];
1717 const int d2
= src1
[2 + i
*stride
] - src2
[2 + i
*stride
];
1718 const int d3
= src1
[3 + i
*stride
] - src2
[3 + i
*stride
];
1719 const int z0
= d0
+ d3
;
1720 const int z3
= d0
- d3
;
1721 const int z1
= d1
+ d2
;
1722 const int z2
= d1
- d2
;
1724 block
[0 + 4*i
]= z0
+ z1
;
1725 block
[1 + 4*i
]= 2*z3
+ z2
;
1726 block
[2 + 4*i
]= z0
- z1
;
1727 block
[3 + 4*i
]= z3
- 2*z2
;
1731 const int z0
= block
[0*4 + i
] + block
[3*4 + i
];
1732 const int z3
= block
[0*4 + i
] - block
[3*4 + i
];
1733 const int z1
= block
[1*4 + i
] + block
[2*4 + i
];
1734 const int z2
= block
[1*4 + i
] - block
[2*4 + i
];
1736 block
[0*4 + i
]= z0
+ z1
;
1737 block
[1*4 + i
]= 2*z3
+ z2
;
1738 block
[2*4 + i
]= z0
- z1
;
1739 block
[3*4 + i
]= z3
- 2*z2
;
1744 //FIXME need to check that this doesnt overflow signed 32 bit for low qp, i am not sure, it's very close
1745 //FIXME check that gcc inlines this (and optimizes intra & seperate_dc stuff away)
1746 static inline int quantize_c(DCTELEM
*block
, uint8_t *scantable
, int qscale
, int intra
, int seperate_dc
){
1748 const int * const quant_table
= quant_coeff
[qscale
];
1749 const int bias
= intra ?
(1<<QUANT_SHIFT
)/3 : (1<<QUANT_SHIFT
)/6;
1750 const unsigned int threshold1
= (1<<QUANT_SHIFT
) - bias
- 1;
1751 const unsigned int threshold2
= (threshold1
<<1);
1757 const int dc_bias
= intra ?
(1<<(QUANT_SHIFT
-2))/3 : (1<<(QUANT_SHIFT
-2))/6;
1758 const unsigned int dc_threshold1
= (1<<(QUANT_SHIFT
-2)) - dc_bias
- 1;
1759 const unsigned int dc_threshold2
= (dc_threshold1
<<1);
1761 int level
= block
[0]*quant_coeff
[qscale
+18][0];
1762 if(((unsigned)(level
+dc_threshold1
))>dc_threshold2
){
1764 level
= (dc_bias
+ level
)>>(QUANT_SHIFT
-2);
1767 level
= (dc_bias
- level
)>>(QUANT_SHIFT
-2);
1770 // last_non_zero = i;
1775 const int dc_bias
= intra ?
(1<<(QUANT_SHIFT
+1))/3 : (1<<(QUANT_SHIFT
+1))/6;
1776 const unsigned int dc_threshold1
= (1<<(QUANT_SHIFT
+1)) - dc_bias
- 1;
1777 const unsigned int dc_threshold2
= (dc_threshold1
<<1);
1779 int level
= block
[0]*quant_table
[0];
1780 if(((unsigned)(level
+dc_threshold1
))>dc_threshold2
){
1782 level
= (dc_bias
+ level
)>>(QUANT_SHIFT
+1);
1785 level
= (dc_bias
- level
)>>(QUANT_SHIFT
+1);
1788 // last_non_zero = i;
1801 const int j
= scantable
[i
];
1802 int level
= block
[j
]*quant_table
[j
];
1804 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
1805 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
1806 if(((unsigned)(level
+threshold1
))>threshold2
){
1808 level
= (bias
+ level
)>>QUANT_SHIFT
;
1811 level
= (bias
- level
)>>QUANT_SHIFT
;
1820 return last_non_zero
;
1823 static void pred4x4_vertical_c(uint8_t *src
, uint8_t *topright
, int stride
){
1824 const uint32_t a
= ((uint32_t*)(src
-stride
))[0];
1825 ((uint32_t*)(src
+0*stride
))[0]= a
;
1826 ((uint32_t*)(src
+1*stride
))[0]= a
;
1827 ((uint32_t*)(src
+2*stride
))[0]= a
;
1828 ((uint32_t*)(src
+3*stride
))[0]= a
;
1831 static void pred4x4_horizontal_c(uint8_t *src
, uint8_t *topright
, int stride
){
1832 ((uint32_t*)(src
+0*stride
))[0]= src
[-1+0*stride
]*0x01010101;
1833 ((uint32_t*)(src
+1*stride
))[0]= src
[-1+1*stride
]*0x01010101;
1834 ((uint32_t*)(src
+2*stride
))[0]= src
[-1+2*stride
]*0x01010101;
1835 ((uint32_t*)(src
+3*stride
))[0]= src
[-1+3*stride
]*0x01010101;
1838 static void pred4x4_dc_c(uint8_t *src
, uint8_t *topright
, int stride
){
1839 const int dc
= ( src
[-stride
] + src
[1-stride
] + src
[2-stride
] + src
[3-stride
]
1840 + src
[-1+0*stride
] + src
[-1+1*stride
] + src
[-1+2*stride
] + src
[-1+3*stride
] + 4) >>3;
1842 ((uint32_t*)(src
+0*stride
))[0]=
1843 ((uint32_t*)(src
+1*stride
))[0]=
1844 ((uint32_t*)(src
+2*stride
))[0]=
1845 ((uint32_t*)(src
+3*stride
))[0]= dc
* 0x01010101;
1848 static void pred4x4_left_dc_c(uint8_t *src
, uint8_t *topright
, int stride
){
1849 const int dc
= ( src
[-1+0*stride
] + src
[-1+1*stride
] + src
[-1+2*stride
] + src
[-1+3*stride
] + 2) >>2;
1851 ((uint32_t*)(src
+0*stride
))[0]=
1852 ((uint32_t*)(src
+1*stride
))[0]=
1853 ((uint32_t*)(src
+2*stride
))[0]=
1854 ((uint32_t*)(src
+3*stride
))[0]= dc
* 0x01010101;
1857 static void pred4x4_top_dc_c(uint8_t *src
, uint8_t *topright
, int stride
){
1858 const int dc
= ( src
[-stride
] + src
[1-stride
] + src
[2-stride
] + src
[3-stride
] + 2) >>2;
1860 ((uint32_t*)(src
+0*stride
))[0]=
1861 ((uint32_t*)(src
+1*stride
))[0]=
1862 ((uint32_t*)(src
+2*stride
))[0]=
1863 ((uint32_t*)(src
+3*stride
))[0]= dc
* 0x01010101;
1866 static void pred4x4_128_dc_c(uint8_t *src
, uint8_t *topright
, int stride
){
1867 ((uint32_t*)(src
+0*stride
))[0]=
1868 ((uint32_t*)(src
+1*stride
))[0]=
1869 ((uint32_t*)(src
+2*stride
))[0]=
1870 ((uint32_t*)(src
+3*stride
))[0]= 128U*0x01010101U
;
1874 #define LOAD_TOP_RIGHT_EDGE\
1875 const int t4= topright[0];\
1876 const int t5= topright[1];\
1877 const int t6= topright[2];\
1878 const int t7= topright[3];\
1880 #define LOAD_LEFT_EDGE\
1881 const int l0= src[-1+0*stride];\
1882 const int l1= src[-1+1*stride];\
1883 const int l2= src[-1+2*stride];\
1884 const int l3= src[-1+3*stride];\
1886 #define LOAD_TOP_EDGE\
1887 const int t0= src[ 0-1*stride];\
1888 const int t1= src[ 1-1*stride];\
1889 const int t2= src[ 2-1*stride];\
1890 const int t3= src[ 3-1*stride];\
1892 static void pred4x4_down_right_c(uint8_t *src, uint8_t *topright, int stride){
1893 const int lt
= src
[-1-1*stride
];
1897 src
[0+3*stride
]=(l3
+ 2*l2
+ l1
+ 2)>>2;
1899 src
[1+3*stride
]=(l2
+ 2*l1
+ l0
+ 2)>>2;
1902 src
[2+3*stride
]=(l1
+ 2*l0
+ lt
+ 2)>>2;
1906 src
[3+3*stride
]=(l0
+ 2*lt
+ t0
+ 2)>>2;
1909 src
[3+2*stride
]=(lt
+ 2*t0
+ t1
+ 2)>>2;
1911 src
[3+1*stride
]=(t0
+ 2*t1
+ t2
+ 2)>>2;
1912 src
[3+0*stride
]=(t1
+ 2*t2
+ t3
+ 2)>>2;
1915 static void pred4x4_down_left_c(uint8_t *src
, uint8_t *topright
, int stride
){
1920 src
[0+0*stride
]=(t0
+ t2
+ 2*t1
+ 2)>>2;
1922 src
[0+1*stride
]=(t1
+ t3
+ 2*t2
+ 2)>>2;
1925 src
[0+2*stride
]=(t2
+ t4
+ 2*t3
+ 2)>>2;
1929 src
[0+3*stride
]=(t3
+ t5
+ 2*t4
+ 2)>>2;
1932 src
[1+3*stride
]=(t4
+ t6
+ 2*t5
+ 2)>>2;
1934 src
[2+3*stride
]=(t5
+ t7
+ 2*t6
+ 2)>>2;
1935 src
[3+3*stride
]=(t6
+ 3*t7
+ 2)>>2;
1938 static void pred4x4_vertical_right_c(uint8_t *src
, uint8_t *topright
, int stride
){
1939 const int lt
= src
[-1-1*stride
];
1942 const __attribute__((unused
)) int unu
= l3
;
1945 src
[1+2*stride
]=(lt
+ t0
+ 1)>>1;
1947 src
[2+2*stride
]=(t0
+ t1
+ 1)>>1;
1949 src
[3+2*stride
]=(t1
+ t2
+ 1)>>1;
1950 src
[3+0*stride
]=(t2
+ t3
+ 1)>>1;
1952 src
[1+3*stride
]=(l0
+ 2*lt
+ t0
+ 2)>>2;
1954 src
[2+3*stride
]=(lt
+ 2*t0
+ t1
+ 2)>>2;
1956 src
[3+3*stride
]=(t0
+ 2*t1
+ t2
+ 2)>>2;
1957 src
[3+1*stride
]=(t1
+ 2*t2
+ t3
+ 2)>>2;
1958 src
[0+2*stride
]=(lt
+ 2*l0
+ l1
+ 2)>>2;
1959 src
[0+3*stride
]=(l0
+ 2*l1
+ l2
+ 2)>>2;
1962 static void pred4x4_vertical_left_c(uint8_t *src
, uint8_t *topright
, int stride
){
1965 const __attribute__((unused
)) int unu
= t7
;
1967 src
[0+0*stride
]=(t0
+ t1
+ 1)>>1;
1969 src
[0+2*stride
]=(t1
+ t2
+ 1)>>1;
1971 src
[1+2*stride
]=(t2
+ t3
+ 1)>>1;
1973 src
[2+2*stride
]=(t3
+ t4
+ 1)>>1;
1974 src
[3+2*stride
]=(t4
+ t5
+ 1)>>1;
1975 src
[0+1*stride
]=(t0
+ 2*t1
+ t2
+ 2)>>2;
1977 src
[0+3*stride
]=(t1
+ 2*t2
+ t3
+ 2)>>2;
1979 src
[1+3*stride
]=(t2
+ 2*t3
+ t4
+ 2)>>2;
1981 src
[2+3*stride
]=(t3
+ 2*t4
+ t5
+ 2)>>2;
1982 src
[3+3*stride
]=(t4
+ 2*t5
+ t6
+ 2)>>2;
1985 static void pred4x4_horizontal_up_c(uint8_t *src
, uint8_t *topright
, int stride
){
1988 src
[0+0*stride
]=(l0
+ l1
+ 1)>>1;
1989 src
[1+0*stride
]=(l0
+ 2*l1
+ l2
+ 2)>>2;
1991 src
[0+1*stride
]=(l1
+ l2
+ 1)>>1;
1993 src
[1+1*stride
]=(l1
+ 2*l2
+ l3
+ 2)>>2;
1995 src
[0+2*stride
]=(l2
+ l3
+ 1)>>1;
1997 src
[1+2*stride
]=(l2
+ 2*l3
+ l3
+ 2)>>2;
2006 static void pred4x4_horizontal_down_c(uint8_t *src
, uint8_t *topright
, int stride
){
2007 const int lt
= src
[-1-1*stride
];
2010 const __attribute__((unused
)) int unu
= t3
;
2013 src
[2+1*stride
]=(lt
+ l0
+ 1)>>1;
2015 src
[3+1*stride
]=(l0
+ 2*lt
+ t0
+ 2)>>2;
2016 src
[2+0*stride
]=(lt
+ 2*t0
+ t1
+ 2)>>2;
2017 src
[3+0*stride
]=(t0
+ 2*t1
+ t2
+ 2)>>2;
2019 src
[2+2*stride
]=(l0
+ l1
+ 1)>>1;
2021 src
[3+2*stride
]=(lt
+ 2*l0
+ l1
+ 2)>>2;
2023 src
[2+3*stride
]=(l1
+ l2
+ 1)>>1;
2025 src
[3+3*stride
]=(l0
+ 2*l1
+ l2
+ 2)>>2;
2026 src
[0+3*stride
]=(l2
+ l3
+ 1)>>1;
2027 src
[1+3*stride
]=(l1
+ 2*l2
+ l3
+ 2)>>2;
2030 static void pred16x16_vertical_c(uint8_t *src
, int stride
){
2032 const uint32_t a
= ((uint32_t*)(src
-stride
))[0];
2033 const uint32_t b
= ((uint32_t*)(src
-stride
))[1];
2034 const uint32_t c
= ((uint32_t*)(src
-stride
))[2];
2035 const uint32_t d
= ((uint32_t*)(src
-stride
))[3];
2037 for(i
=0; i
<16; i
++){
2038 ((uint32_t*)(src
+i
*stride
))[0]= a
;
2039 ((uint32_t*)(src
+i
*stride
))[1]= b
;
2040 ((uint32_t*)(src
+i
*stride
))[2]= c
;
2041 ((uint32_t*)(src
+i
*stride
))[3]= d
;
2045 static void pred16x16_horizontal_c(uint8_t *src
, int stride
){
2048 for(i
=0; i
<16; i
++){
2049 ((uint32_t*)(src
+i
*stride
))[0]=
2050 ((uint32_t*)(src
+i
*stride
))[1]=
2051 ((uint32_t*)(src
+i
*stride
))[2]=
2052 ((uint32_t*)(src
+i
*stride
))[3]= src
[-1+i
*stride
]*0x01010101;
2056 static void pred16x16_dc_c(uint8_t *src
, int stride
){
2060 dc
+= src
[-1+i
*stride
];
2067 dc
= 0x01010101*((dc
+ 16)>>5);
2069 for(i
=0; i
<16; i
++){
2070 ((uint32_t*)(src
+i
*stride
))[0]=
2071 ((uint32_t*)(src
+i
*stride
))[1]=
2072 ((uint32_t*)(src
+i
*stride
))[2]=
2073 ((uint32_t*)(src
+i
*stride
))[3]= dc
;
2077 static void pred16x16_left_dc_c(uint8_t *src
, int stride
){
2081 dc
+= src
[-1+i
*stride
];
2084 dc
= 0x01010101*((dc
+ 8)>>4);
2086 for(i
=0; i
<16; i
++){
2087 ((uint32_t*)(src
+i
*stride
))[0]=
2088 ((uint32_t*)(src
+i
*stride
))[1]=
2089 ((uint32_t*)(src
+i
*stride
))[2]=
2090 ((uint32_t*)(src
+i
*stride
))[3]= dc
;
2094 static void pred16x16_top_dc_c(uint8_t *src
, int stride
){
2100 dc
= 0x01010101*((dc
+ 8)>>4);
2102 for(i
=0; i
<16; i
++){
2103 ((uint32_t*)(src
+i
*stride
))[0]=
2104 ((uint32_t*)(src
+i
*stride
))[1]=
2105 ((uint32_t*)(src
+i
*stride
))[2]=
2106 ((uint32_t*)(src
+i
*stride
))[3]= dc
;
2110 static void pred16x16_128_dc_c(uint8_t *src
, int stride
){
2113 for(i
=0; i
<16; i
++){
2114 ((uint32_t*)(src
+i
*stride
))[0]=
2115 ((uint32_t*)(src
+i
*stride
))[1]=
2116 ((uint32_t*)(src
+i
*stride
))[2]=
2117 ((uint32_t*)(src
+i
*stride
))[3]= 0x01010101U
*128U;
2121 static inline void pred16x16_plane_compat_c(uint8_t *src
, int stride
, const int svq3
){
2124 uint8_t *cm
= cropTbl
+ MAX_NEG_CROP
;
2125 const uint8_t * const src0
= src
+7-stride
;
2126 const uint8_t *src1
= src
+8*stride
-1;
2127 const uint8_t *src2
= src1
-2*stride
; // == src+6*stride-1;
2128 int H
= src0
[1] - src0
[-1];
2129 int V
= src1
[0] - src2
[ 0];
2130 for(k
=2; k
<=8; ++k
) {
2131 src1
+= stride
; src2
-= stride
;
2132 H
+= k
*(src0
[k
] - src0
[-k
]);
2133 V
+= k
*(src1
[0] - src2
[ 0]);
2136 H
= ( 5*(H
/4) ) / 16;
2137 V
= ( 5*(V
/4) ) / 16;
2139 /* required for 100% accuracy */
2140 i
= H
; H
= V
; V
= i
;
2142 H
= ( 5*H
+32 ) >> 6;
2143 V
= ( 5*V
+32 ) >> 6;
2146 a
= 16*(src1
[0] + src2
[16] + 1) - 7*(V
+H
);
2147 for(j
=16; j
>0; --j
) {
2150 for(i
=-16; i
<0; i
+=4) {
2151 src
[16+i
] = cm
[ (b
) >> 5 ];
2152 src
[17+i
] = cm
[ (b
+ H
) >> 5 ];
2153 src
[18+i
] = cm
[ (b
+2*H
) >> 5 ];
2154 src
[19+i
] = cm
[ (b
+3*H
) >> 5 ];
2161 static void pred16x16_plane_c(uint8_t *src
, int stride
){
2162 pred16x16_plane_compat_c(src
, stride
, 0);
2165 static void pred8x8_vertical_c(uint8_t *src
, int stride
){
2167 const uint32_t a
= ((uint32_t*)(src
-stride
))[0];
2168 const uint32_t b
= ((uint32_t*)(src
-stride
))[1];
2171 ((uint32_t*)(src
+i
*stride
))[0]= a
;
2172 ((uint32_t*)(src
+i
*stride
))[1]= b
;
2176 static void pred8x8_horizontal_c(uint8_t *src
, int stride
){
2180 ((uint32_t*)(src
+i
*stride
))[0]=
2181 ((uint32_t*)(src
+i
*stride
))[1]= src
[-1+i
*stride
]*0x01010101;
2185 static void pred8x8_128_dc_c(uint8_t *src
, int stride
){
2189 ((uint32_t*)(src
+i
*stride
))[0]=
2190 ((uint32_t*)(src
+i
*stride
))[1]= 0x01010101U
*128U;
2193 ((uint32_t*)(src
+i
*stride
))[0]=
2194 ((uint32_t*)(src
+i
*stride
))[1]= 0x01010101U
*128U;
2198 static void pred8x8_left_dc_c(uint8_t *src
, int stride
){
2204 dc0
+= src
[-1+i
*stride
];
2205 dc2
+= src
[-1+(i
+4)*stride
];
2207 dc0
= 0x01010101*((dc0
+ 2)>>2);
2208 dc2
= 0x01010101*((dc2
+ 2)>>2);
2211 ((uint32_t*)(src
+i
*stride
))[0]=
2212 ((uint32_t*)(src
+i
*stride
))[1]= dc0
;
2215 ((uint32_t*)(src
+i
*stride
))[0]=
2216 ((uint32_t*)(src
+i
*stride
))[1]= dc2
;
2220 static void pred8x8_top_dc_c(uint8_t *src
, int stride
){
2226 dc0
+= src
[i
-stride
];
2227 dc1
+= src
[4+i
-stride
];
2229 dc0
= 0x01010101*((dc0
+ 2)>>2);
2230 dc1
= 0x01010101*((dc1
+ 2)>>2);
2233 ((uint32_t*)(src
+i
*stride
))[0]= dc0
;
2234 ((uint32_t*)(src
+i
*stride
))[1]= dc1
;
2237 ((uint32_t*)(src
+i
*stride
))[0]= dc0
;
2238 ((uint32_t*)(src
+i
*stride
))[1]= dc1
;
2243 static void pred8x8_dc_c(uint8_t *src
, int stride
){
2245 int dc0
, dc1
, dc2
, dc3
;
2249 dc0
+= src
[-1+i
*stride
] + src
[i
-stride
];
2250 dc1
+= src
[4+i
-stride
];
2251 dc2
+= src
[-1+(i
+4)*stride
];
2253 dc3
= 0x01010101*((dc1
+ dc2
+ 4)>>3);
2254 dc0
= 0x01010101*((dc0
+ 4)>>3);
2255 dc1
= 0x01010101*((dc1
+ 2)>>2);
2256 dc2
= 0x01010101*((dc2
+ 2)>>2);
2259 ((uint32_t*)(src
+i
*stride
))[0]= dc0
;
2260 ((uint32_t*)(src
+i
*stride
))[1]= dc1
;
2263 ((uint32_t*)(src
+i
*stride
))[0]= dc2
;
2264 ((uint32_t*)(src
+i
*stride
))[1]= dc3
;
2268 static void pred8x8_plane_c(uint8_t *src
, int stride
){
2271 uint8_t *cm
= cropTbl
+ MAX_NEG_CROP
;
2272 const uint8_t * const src0
= src
+3-stride
;
2273 const uint8_t *src1
= src
+4*stride
-1;
2274 const uint8_t *src2
= src1
-2*stride
; // == src+2*stride-1;
2275 int H
= src0
[1] - src0
[-1];
2276 int V
= src1
[0] - src2
[ 0];
2277 for(k
=2; k
<=4; ++k
) {
2278 src1
+= stride
; src2
-= stride
;
2279 H
+= k
*(src0
[k
] - src0
[-k
]);
2280 V
+= k
*(src1
[0] - src2
[ 0]);
2282 H
= ( 17*H
+16 ) >> 5;
2283 V
= ( 17*V
+16 ) >> 5;
2285 a
= 16*(src1
[0] + src2
[8]+1) - 3*(V
+H
);
2286 for(j
=8; j
>0; --j
) {
2289 src
[0] = cm
[ (b
) >> 5 ];
2290 src
[1] = cm
[ (b
+ H
) >> 5 ];
2291 src
[2] = cm
[ (b
+2*H
) >> 5 ];
2292 src
[3] = cm
[ (b
+3*H
) >> 5 ];
2293 src
[4] = cm
[ (b
+4*H
) >> 5 ];
2294 src
[5] = cm
[ (b
+5*H
) >> 5 ];
2295 src
[6] = cm
[ (b
+6*H
) >> 5 ];
2296 src
[7] = cm
[ (b
+7*H
) >> 5 ];
2301 static inline void mc_dir_part(H264Context
*h
, Picture
*pic
, int n
, int square
, int chroma_height
, int delta
, int list
,
2302 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2303 int src_x_offset
, int src_y_offset
,
2304 qpel_mc_func
*qpix_op
, h264_chroma_mc_func chroma_op
){
2305 MpegEncContext
* const s
= &h
->s
;
2306 const int mx
= h
->mv_cache
[list
][ scan8
[n
] ][0] + src_x_offset
*8;
2307 const int my
= h
->mv_cache
[list
][ scan8
[n
] ][1] + src_y_offset
*8;
2308 const int luma_xy
= (mx
&3) + ((my
&3)<<2);
2309 uint8_t * src_y
= pic
->data
[0] + (mx
>>2) + (my
>>2)*s
->linesize
;
2310 uint8_t * src_cb
= pic
->data
[1] + (mx
>>3) + (my
>>3)*s
->uvlinesize
;
2311 uint8_t * src_cr
= pic
->data
[2] + (mx
>>3) + (my
>>3)*s
->uvlinesize
;
2312 int extra_width
= (s
->flags
&CODEC_FLAG_EMU_EDGE
) ?
0 : 16; //FIXME increase edge?, IMHO not worth it
2313 int extra_height
= extra_width
;
2315 const int full_mx
= mx
>>2;
2316 const int full_my
= my
>>2;
2318 assert(pic
->data
[0]);
2320 if(mx
&7) extra_width
-= 3;
2321 if(my
&7) extra_height
-= 3;
2323 if( full_mx
< 0-extra_width
2324 || full_my
< 0-extra_height
2325 || full_mx
+ 16/*FIXME*/ > s
->width
+ extra_width
2326 || full_my
+ 16/*FIXME*/ > s
->height
+ extra_height
){
2327 ff_emulated_edge_mc(s
->edge_emu_buffer
, src_y
- 2 - 2*s
->linesize
, s
->linesize
, 16+5, 16+5/*FIXME*/, full_mx
-2, full_my
-2, s
->width
, s
->height
);
2328 src_y
= s
->edge_emu_buffer
+ 2 + 2*s
->linesize
;
2332 qpix_op
[luma_xy
](dest_y
, src_y
, s
->linesize
); //FIXME try variable height perhaps?
2334 qpix_op
[luma_xy
](dest_y
+ delta
, src_y
+ delta
, s
->linesize
);
2337 if(s
->flags
&CODEC_FLAG_GRAY
) return;
2340 ff_emulated_edge_mc(s
->edge_emu_buffer
, src_cb
, s
->uvlinesize
, 9, 9/*FIXME*/, (mx
>>3), (my
>>3), s
->width
>>1, s
->height
>>1);
2341 src_cb
= s
->edge_emu_buffer
;
2343 chroma_op(dest_cb
, src_cb
, s
->uvlinesize
, chroma_height
, mx
&7, my
&7);
2346 ff_emulated_edge_mc(s
->edge_emu_buffer
, src_cr
, s
->uvlinesize
, 9, 9/*FIXME*/, (mx
>>3), (my
>>3), s
->width
>>1, s
->height
>>1);
2347 src_cr
= s
->edge_emu_buffer
;
2349 chroma_op(dest_cr
, src_cr
, s
->uvlinesize
, chroma_height
, mx
&7, my
&7);
2352 static inline void mc_part_std(H264Context
*h
, int n
, int square
, int chroma_height
, int delta
,
2353 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2354 int x_offset
, int y_offset
,
2355 qpel_mc_func
*qpix_put
, h264_chroma_mc_func chroma_put
,
2356 qpel_mc_func
*qpix_avg
, h264_chroma_mc_func chroma_avg
,
2357 int list0
, int list1
){
2358 MpegEncContext
* const s
= &h
->s
;
2359 qpel_mc_func
*qpix_op
= qpix_put
;
2360 h264_chroma_mc_func chroma_op
= chroma_put
;
2362 dest_y
+= 2*x_offset
+ 2*y_offset
*s
-> linesize
;
2363 dest_cb
+= x_offset
+ y_offset
*s
->uvlinesize
;
2364 dest_cr
+= x_offset
+ y_offset
*s
->uvlinesize
;
2365 x_offset
+= 8*s
->mb_x
;
2366 y_offset
+= 8*s
->mb_y
;
2369 Picture
*ref
= &h
->ref_list
[0][ h
->ref_cache
[0][ scan8
[n
] ] ];
2370 mc_dir_part(h
, ref
, n
, square
, chroma_height
, delta
, 0,
2371 dest_y
, dest_cb
, dest_cr
, x_offset
, y_offset
,
2372 qpix_op
, chroma_op
);
2375 chroma_op
= chroma_avg
;
2379 Picture
*ref
= &h
->ref_list
[1][ h
->ref_cache
[1][ scan8
[n
] ] ];
2380 mc_dir_part(h
, ref
, n
, square
, chroma_height
, delta
, 1,
2381 dest_y
, dest_cb
, dest_cr
, x_offset
, y_offset
,
2382 qpix_op
, chroma_op
);
2386 static inline void mc_part_weighted(H264Context
*h
, int n
, int square
, int chroma_height
, int delta
,
2387 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2388 int x_offset
, int y_offset
,
2389 qpel_mc_func
*qpix_put
, h264_chroma_mc_func chroma_put
,
2390 h264_weight_func luma_weight_op
, h264_weight_func chroma_weight_op
,
2391 h264_biweight_func luma_weight_avg
, h264_biweight_func chroma_weight_avg
,
2392 int list0
, int list1
){
2393 MpegEncContext
* const s
= &h
->s
;
2395 dest_y
+= 2*x_offset
+ 2*y_offset
*s
-> linesize
;
2396 dest_cb
+= x_offset
+ y_offset
*s
->uvlinesize
;
2397 dest_cr
+= x_offset
+ y_offset
*s
->uvlinesize
;
2398 x_offset
+= 8*s
->mb_x
;
2399 y_offset
+= 8*s
->mb_y
;
2402 /* don't optimize for luma-only case, since B-frames usually
2403 * use implicit weights => chroma too. */
2404 uint8_t *tmp_cb
= s
->obmc_scratchpad
;
2405 uint8_t *tmp_cr
= tmp_cb
+ 8*s
->uvlinesize
;
2406 uint8_t *tmp_y
= tmp_cr
+ 8*s
->uvlinesize
;
2407 int refn0
= h
->ref_cache
[0][ scan8
[n
] ];
2408 int refn1
= h
->ref_cache
[1][ scan8
[n
] ];
2410 mc_dir_part(h
, &h
->ref_list
[0][refn0
], n
, square
, chroma_height
, delta
, 0,
2411 dest_y
, dest_cb
, dest_cr
,
2412 x_offset
, y_offset
, qpix_put
, chroma_put
);
2413 mc_dir_part(h
, &h
->ref_list
[1][refn1
], n
, square
, chroma_height
, delta
, 1,
2414 tmp_y
, tmp_cb
, tmp_cr
,
2415 x_offset
, y_offset
, qpix_put
, chroma_put
);
2417 if(h
->use_weight
== 2){
2418 int weight0
= h
->implicit_weight
[refn0
][refn1
];
2419 int weight1
= 64 - weight0
;
2420 luma_weight_avg( dest_y
, tmp_y
, s
-> linesize
, 5, weight0
, weight1
, 0, 0);
2421 chroma_weight_avg(dest_cb
, tmp_cb
, s
->uvlinesize
, 5, weight0
, weight1
, 0, 0);
2422 chroma_weight_avg(dest_cr
, tmp_cr
, s
->uvlinesize
, 5, weight0
, weight1
, 0, 0);
2424 luma_weight_avg(dest_y
, tmp_y
, s
->linesize
, h
->luma_log2_weight_denom
,
2425 h
->luma_weight
[0][refn0
], h
->luma_weight
[1][refn1
],
2426 h
->luma_offset
[0][refn0
], h
->luma_offset
[1][refn1
]);
2427 chroma_weight_avg(dest_cb
, tmp_cb
, s
->uvlinesize
, h
->chroma_log2_weight_denom
,
2428 h
->chroma_weight
[0][refn0
][0], h
->chroma_weight
[1][refn1
][0],
2429 h
->chroma_offset
[0][refn0
][0], h
->chroma_offset
[1][refn1
][0]);
2430 chroma_weight_avg(dest_cr
, tmp_cr
, s
->uvlinesize
, h
->chroma_log2_weight_denom
,
2431 h
->chroma_weight
[0][refn0
][1], h
->chroma_weight
[1][refn1
][1],
2432 h
->chroma_offset
[0][refn0
][1], h
->chroma_offset
[1][refn1
][1]);
2435 int list
= list1 ?
1 : 0;
2436 int refn
= h
->ref_cache
[list
][ scan8
[n
] ];
2437 Picture
*ref
= &h
->ref_list
[list
][refn
];
2438 mc_dir_part(h
, ref
, n
, square
, chroma_height
, delta
, list
,
2439 dest_y
, dest_cb
, dest_cr
, x_offset
, y_offset
,
2440 qpix_put
, chroma_put
);
2442 luma_weight_op(dest_y
, s
->linesize
, h
->luma_log2_weight_denom
,
2443 h
->luma_weight
[list
][refn
], h
->luma_offset
[list
][refn
]);
2444 if(h
->use_weight_chroma
){
2445 chroma_weight_op(dest_cb
, s
->uvlinesize
, h
->chroma_log2_weight_denom
,
2446 h
->chroma_weight
[list
][refn
][0], h
->chroma_offset
[list
][refn
][0]);
2447 chroma_weight_op(dest_cr
, s
->uvlinesize
, h
->chroma_log2_weight_denom
,
2448 h
->chroma_weight
[list
][refn
][1], h
->chroma_offset
[list
][refn
][1]);
2453 static inline void mc_part(H264Context
*h
, int n
, int square
, int chroma_height
, int delta
,
2454 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2455 int x_offset
, int y_offset
,
2456 qpel_mc_func
*qpix_put
, h264_chroma_mc_func chroma_put
,
2457 qpel_mc_func
*qpix_avg
, h264_chroma_mc_func chroma_avg
,
2458 h264_weight_func
*weight_op
, h264_biweight_func
*weight_avg
,
2459 int list0
, int list1
){
2460 if((h
->use_weight
==2 && list0
&& list1
2461 && (h
->implicit_weight
[ h
->ref_cache
[0][scan8
[n
]] ][ h
->ref_cache
[1][scan8
[n
]] ] != 32))
2462 || h
->use_weight
==1)
2463 mc_part_weighted(h
, n
, square
, chroma_height
, delta
, dest_y
, dest_cb
, dest_cr
,
2464 x_offset
, y_offset
, qpix_put
, chroma_put
,
2465 weight_op
[0], weight_op
[3], weight_avg
[0], weight_avg
[3], list0
, list1
);
2467 mc_part_std(h
, n
, square
, chroma_height
, delta
, dest_y
, dest_cb
, dest_cr
,
2468 x_offset
, y_offset
, qpix_put
, chroma_put
, qpix_avg
, chroma_avg
, list0
, list1
);
2471 static void hl_motion(H264Context
*h
, uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2472 qpel_mc_func (*qpix_put
)[16], h264_chroma_mc_func (*chroma_put
),
2473 qpel_mc_func (*qpix_avg
)[16], h264_chroma_mc_func (*chroma_avg
),
2474 h264_weight_func
*weight_op
, h264_biweight_func
*weight_avg
){
2475 MpegEncContext
* const s
= &h
->s
;
2476 const int mb_xy
= s
->mb_x
+ s
->mb_y
*s
->mb_stride
;
2477 const int mb_type
= s
->current_picture
.mb_type
[mb_xy
];
2479 assert(IS_INTER(mb_type
));
2481 if(IS_16X16(mb_type
)){
2482 mc_part(h
, 0, 1, 8, 0, dest_y
, dest_cb
, dest_cr
, 0, 0,
2483 qpix_put
[0], chroma_put
[0], qpix_avg
[0], chroma_avg
[0],
2484 &weight_op
[0], &weight_avg
[0],
2485 IS_DIR(mb_type
, 0, 0), IS_DIR(mb_type
, 0, 1));
2486 }else if(IS_16X8(mb_type
)){
2487 mc_part(h
, 0, 0, 4, 8, dest_y
, dest_cb
, dest_cr
, 0, 0,
2488 qpix_put
[1], chroma_put
[0], qpix_avg
[1], chroma_avg
[0],
2489 &weight_op
[1], &weight_avg
[1],
2490 IS_DIR(mb_type
, 0, 0), IS_DIR(mb_type
, 0, 1));
2491 mc_part(h
, 8, 0, 4, 8, dest_y
, dest_cb
, dest_cr
, 0, 4,
2492 qpix_put
[1], chroma_put
[0], qpix_avg
[1], chroma_avg
[0],
2493 &weight_op
[1], &weight_avg
[1],
2494 IS_DIR(mb_type
, 1, 0), IS_DIR(mb_type
, 1, 1));
2495 }else if(IS_8X16(mb_type
)){
2496 mc_part(h
, 0, 0, 8, 8*s
->linesize
, dest_y
, dest_cb
, dest_cr
, 0, 0,
2497 qpix_put
[1], chroma_put
[1], qpix_avg
[1], chroma_avg
[1],
2498 &weight_op
[2], &weight_avg
[2],
2499 IS_DIR(mb_type
, 0, 0), IS_DIR(mb_type
, 0, 1));
2500 mc_part(h
, 4, 0, 8, 8*s
->linesize
, dest_y
, dest_cb
, dest_cr
, 4, 0,
2501 qpix_put
[1], chroma_put
[1], qpix_avg
[1], chroma_avg
[1],
2502 &weight_op
[2], &weight_avg
[2],
2503 IS_DIR(mb_type
, 1, 0), IS_DIR(mb_type
, 1, 1));
2507 assert(IS_8X8(mb_type
));
2510 const int sub_mb_type
= h
->sub_mb_type
[i
];
2512 int x_offset
= (i
&1)<<2;
2513 int y_offset
= (i
&2)<<1;
2515 if(IS_SUB_8X8(sub_mb_type
)){
2516 mc_part(h
, n
, 1, 4, 0, dest_y
, dest_cb
, dest_cr
, x_offset
, y_offset
,
2517 qpix_put
[1], chroma_put
[1], qpix_avg
[1], chroma_avg
[1],
2518 &weight_op
[3], &weight_avg
[3],
2519 IS_DIR(sub_mb_type
, 0, 0), IS_DIR(sub_mb_type
, 0, 1));
2520 }else if(IS_SUB_8X4(sub_mb_type
)){
2521 mc_part(h
, n
, 0, 2, 4, dest_y
, dest_cb
, dest_cr
, x_offset
, y_offset
,
2522 qpix_put
[2], chroma_put
[1], qpix_avg
[2], chroma_avg
[1],
2523 &weight_op
[4], &weight_avg
[4],
2524 IS_DIR(sub_mb_type
, 0, 0), IS_DIR(sub_mb_type
, 0, 1));
2525 mc_part(h
, n
+2, 0, 2, 4, dest_y
, dest_cb
, dest_cr
, x_offset
, y_offset
+2,
2526 qpix_put
[2], chroma_put
[1], qpix_avg
[2], chroma_avg
[1],
2527 &weight_op
[4], &weight_avg
[4],
2528 IS_DIR(sub_mb_type
, 0, 0), IS_DIR(sub_mb_type
, 0, 1));
2529 }else if(IS_SUB_4X8(sub_mb_type
)){
2530 mc_part(h
, n
, 0, 4, 4*s
->linesize
, dest_y
, dest_cb
, dest_cr
, x_offset
, y_offset
,
2531 qpix_put
[2], chroma_put
[2], qpix_avg
[2], chroma_avg
[2],
2532 &weight_op
[5], &weight_avg
[5],
2533 IS_DIR(sub_mb_type
, 0, 0), IS_DIR(sub_mb_type
, 0, 1));
2534 mc_part(h
, n
+1, 0, 4, 4*s
->linesize
, dest_y
, dest_cb
, dest_cr
, x_offset
+2, y_offset
,
2535 qpix_put
[2], chroma_put
[2], qpix_avg
[2], chroma_avg
[2],
2536 &weight_op
[5], &weight_avg
[5],
2537 IS_DIR(sub_mb_type
, 0, 0), IS_DIR(sub_mb_type
, 0, 1));
2540 assert(IS_SUB_4X4(sub_mb_type
));
2542 int sub_x_offset
= x_offset
+ 2*(j
&1);
2543 int sub_y_offset
= y_offset
+ (j
&2);
2544 mc_part(h
, n
+j
, 1, 2, 0, dest_y
, dest_cb
, dest_cr
, sub_x_offset
, sub_y_offset
,
2545 qpix_put
[2], chroma_put
[2], qpix_avg
[2], chroma_avg
[2],
2546 &weight_op
[6], &weight_avg
[6],
2547 IS_DIR(sub_mb_type
, 0, 0), IS_DIR(sub_mb_type
, 0, 1));
2554 static void decode_init_vlc(H264Context
*h
){
2555 static int done
= 0;
2561 init_vlc(&chroma_dc_coeff_token_vlc
, CHROMA_DC_COEFF_TOKEN_VLC_BITS
, 4*5,
2562 &chroma_dc_coeff_token_len
[0], 1, 1,
2563 &chroma_dc_coeff_token_bits
[0], 1, 1, 1);
2566 init_vlc(&coeff_token_vlc
[i
], COEFF_TOKEN_VLC_BITS
, 4*17,
2567 &coeff_token_len
[i
][0], 1, 1,
2568 &coeff_token_bits
[i
][0], 1, 1, 1);
2572 init_vlc(&chroma_dc_total_zeros_vlc
[i
], CHROMA_DC_TOTAL_ZEROS_VLC_BITS
, 4,
2573 &chroma_dc_total_zeros_len
[i
][0], 1, 1,
2574 &chroma_dc_total_zeros_bits
[i
][0], 1, 1, 1);
2576 for(i
=0; i
<15; i
++){
2577 init_vlc(&total_zeros_vlc
[i
], TOTAL_ZEROS_VLC_BITS
, 16,
2578 &total_zeros_len
[i
][0], 1, 1,
2579 &total_zeros_bits
[i
][0], 1, 1, 1);
2583 init_vlc(&run_vlc
[i
], RUN_VLC_BITS
, 7,
2584 &run_len
[i
][0], 1, 1,
2585 &run_bits
[i
][0], 1, 1, 1);
2587 init_vlc(&run7_vlc
, RUN7_VLC_BITS
, 16,
2588 &run_len
[6][0], 1, 1,
2589 &run_bits
[6][0], 1, 1, 1);
2594 * Sets the intra prediction function pointers.
2596 static void init_pred_ptrs(H264Context
*h
){
2597 // MpegEncContext * const s = &h->s;
2599 h
->pred4x4
[VERT_PRED
]= pred4x4_vertical_c
;
2600 h
->pred4x4
[HOR_PRED
]= pred4x4_horizontal_c
;
2601 h
->pred4x4
[DC_PRED
]= pred4x4_dc_c
;
2602 h
->pred4x4
[DIAG_DOWN_LEFT_PRED
]= pred4x4_down_left_c
;
2603 h
->pred4x4
[DIAG_DOWN_RIGHT_PRED
]= pred4x4_down_right_c
;
2604 h
->pred4x4
[VERT_RIGHT_PRED
]= pred4x4_vertical_right_c
;
2605 h
->pred4x4
[HOR_DOWN_PRED
]= pred4x4_horizontal_down_c
;
2606 h
->pred4x4
[VERT_LEFT_PRED
]= pred4x4_vertical_left_c
;
2607 h
->pred4x4
[HOR_UP_PRED
]= pred4x4_horizontal_up_c
;
2608 h
->pred4x4
[LEFT_DC_PRED
]= pred4x4_left_dc_c
;
2609 h
->pred4x4
[TOP_DC_PRED
]= pred4x4_top_dc_c
;
2610 h
->pred4x4
[DC_128_PRED
]= pred4x4_128_dc_c
;
2612 h
->pred8x8
[DC_PRED8x8
]= pred8x8_dc_c
;
2613 h
->pred8x8
[VERT_PRED8x8
]= pred8x8_vertical_c
;
2614 h
->pred8x8
[HOR_PRED8x8
]= pred8x8_horizontal_c
;
2615 h
->pred8x8
[PLANE_PRED8x8
]= pred8x8_plane_c
;
2616 h
->pred8x8
[LEFT_DC_PRED8x8
]= pred8x8_left_dc_c
;
2617 h
->pred8x8
[TOP_DC_PRED8x8
]= pred8x8_top_dc_c
;
2618 h
->pred8x8
[DC_128_PRED8x8
]= pred8x8_128_dc_c
;
2620 h
->pred16x16
[DC_PRED8x8
]= pred16x16_dc_c
;
2621 h
->pred16x16
[VERT_PRED8x8
]= pred16x16_vertical_c
;
2622 h
->pred16x16
[HOR_PRED8x8
]= pred16x16_horizontal_c
;
2623 h
->pred16x16
[PLANE_PRED8x8
]= pred16x16_plane_c
;
2624 h
->pred16x16
[LEFT_DC_PRED8x8
]= pred16x16_left_dc_c
;
2625 h
->pred16x16
[TOP_DC_PRED8x8
]= pred16x16_top_dc_c
;
2626 h
->pred16x16
[DC_128_PRED8x8
]= pred16x16_128_dc_c
;
2629 static void free_tables(H264Context
*h
){
2630 av_freep(&h
->intra4x4_pred_mode
);
2631 av_freep(&h
->chroma_pred_mode_table
);
2632 av_freep(&h
->cbp_table
);
2633 av_freep(&h
->mvd_table
[0]);
2634 av_freep(&h
->mvd_table
[1]);
2635 av_freep(&h
->direct_table
);
2636 av_freep(&h
->non_zero_count
);
2637 av_freep(&h
->slice_table_base
);
2638 av_freep(&h
->top_borders
[1]);
2639 av_freep(&h
->top_borders
[0]);
2640 h
->slice_table
= NULL
;
2642 av_freep(&h
->mb2b_xy
);
2643 av_freep(&h
->mb2b8_xy
);
2645 av_freep(&h
->s
.obmc_scratchpad
);
2650 * needs width/height
2652 static int alloc_tables(H264Context
*h
){
2653 MpegEncContext
* const s
= &h
->s
;
2654 const int big_mb_num
= s
->mb_stride
* (s
->mb_height
+1);
2657 CHECKED_ALLOCZ(h
->intra4x4_pred_mode
, big_mb_num
* 8 * sizeof(uint8_t))
2659 CHECKED_ALLOCZ(h
->non_zero_count
, big_mb_num
* 16 * sizeof(uint8_t))
2660 CHECKED_ALLOCZ(h
->slice_table_base
, big_mb_num
* sizeof(uint8_t))
2661 CHECKED_ALLOCZ(h
->top_borders
[0] , s
->mb_width
* (16+8+8) * sizeof(uint8_t))
2662 CHECKED_ALLOCZ(h
->top_borders
[1] , s
->mb_width
* (16+8+8) * sizeof(uint8_t))
2663 CHECKED_ALLOCZ(h
->cbp_table
, big_mb_num
* sizeof(uint16_t))
2665 if( h
->pps
.cabac
) {
2666 CHECKED_ALLOCZ(h
->chroma_pred_mode_table
, big_mb_num
* sizeof(uint8_t))
2667 CHECKED_ALLOCZ(h
->mvd_table
[0], 32*big_mb_num
* sizeof(uint16_t));
2668 CHECKED_ALLOCZ(h
->mvd_table
[1], 32*big_mb_num
* sizeof(uint16_t));
2669 CHECKED_ALLOCZ(h
->direct_table
, 32*big_mb_num
* sizeof(uint8_t));
2672 memset(h
->slice_table_base
, -1, big_mb_num
* sizeof(uint8_t));
2673 h
->slice_table
= h
->slice_table_base
+ s
->mb_stride
+ 1;
2675 CHECKED_ALLOCZ(h
->mb2b_xy
, big_mb_num
* sizeof(uint32_t));
2676 CHECKED_ALLOCZ(h
->mb2b8_xy
, big_mb_num
* sizeof(uint32_t));
2677 for(y
=0; y
<s
->mb_height
; y
++){
2678 for(x
=0; x
<s
->mb_width
; x
++){
2679 const int mb_xy
= x
+ y
*s
->mb_stride
;
2680 const int b_xy
= 4*x
+ 4*y
*h
->b_stride
;
2681 const int b8_xy
= 2*x
+ 2*y
*h
->b8_stride
;
2683 h
->mb2b_xy
[mb_xy
]= b_xy
;
2684 h
->mb2b8_xy
[mb_xy
]= b8_xy
;
2688 s
->obmc_scratchpad
= NULL
;
2696 static void common_init(H264Context
*h
){
2697 MpegEncContext
* const s
= &h
->s
;
2699 s
->width
= s
->avctx
->width
;
2700 s
->height
= s
->avctx
->height
;
2701 s
->codec_id
= s
->avctx
->codec
->id
;
2705 s
->unrestricted_mv
=1;
2706 s
->decode
=1; //FIXME
2709 static int decode_init(AVCodecContext
*avctx
){
2710 H264Context
*h
= avctx
->priv_data
;
2711 MpegEncContext
* const s
= &h
->s
;
2713 MPV_decode_defaults(s
);
2718 s
->out_format
= FMT_H264
;
2719 s
->workaround_bugs
= avctx
->workaround_bugs
;
2722 // s->decode_mb= ff_h263_decode_mb;
2724 avctx
->pix_fmt
= PIX_FMT_YUV420P
;
2726 if(s
->dsp
.h264_idct_add
== ff_h264_idct_add_c
){ //FIXME little ugly
2727 memcpy(h
->zigzag_scan
, zigzag_scan
, 16*sizeof(uint8_t));
2728 memcpy(h
-> field_scan
, field_scan
, 16*sizeof(uint8_t));
2731 for(i
=0; i
<16; i
++){
2732 #define T(x) (x>>2) | ((x<<2) & 0xF)
2733 h
->zigzag_scan
[i
] = T(zigzag_scan
[i
]);
2734 h
-> field_scan
[i
] = T( field_scan
[i
]);
2740 if(avctx
->extradata_size
> 0 && avctx
->extradata
&&
2741 *(char *)avctx
->extradata
== 1){
2751 static void frame_start(H264Context
*h
){
2752 MpegEncContext
* const s
= &h
->s
;
2755 MPV_frame_start(s
, s
->avctx
);
2756 ff_er_frame_start(s
);
2758 assert(s
->linesize
&& s
->uvlinesize
);
2760 for(i
=0; i
<16; i
++){
2761 h
->block_offset
[i
]= 4*((scan8
[i
] - scan8
[0])&7) + 4*s
->linesize
*((scan8
[i
] - scan8
[0])>>3);
2762 h
->block_offset
[24+i
]= 4*((scan8
[i
] - scan8
[0])&7) + 8*s
->linesize
*((scan8
[i
] - scan8
[0])>>3);
2765 h
->block_offset
[16+i
]=
2766 h
->block_offset
[20+i
]= 4*((scan8
[i
] - scan8
[0])&7) + 4*s
->uvlinesize
*((scan8
[i
] - scan8
[0])>>3);
2767 h
->block_offset
[24+16+i
]=
2768 h
->block_offset
[24+20+i
]= 4*((scan8
[i
] - scan8
[0])&7) + 8*s
->uvlinesize
*((scan8
[i
] - scan8
[0])>>3);
2771 /* can't be in alloc_tables because linesize isn't known there.
2772 * FIXME: redo bipred weight to not require extra buffer? */
2773 if(!s
->obmc_scratchpad
)
2774 s
->obmc_scratchpad
= av_malloc(16*s
->linesize
+ 2*8*s
->uvlinesize
);
2776 // s->decode= (s->flags&CODEC_FLAG_PSNR) || !s->encoding || s->current_picture.reference /*|| h->contains_intra*/ || 1;
2779 static inline void backup_mb_border(H264Context
*h
, uint8_t *src_y
, uint8_t *src_cb
, uint8_t *src_cr
, int linesize
, int uvlinesize
){
2780 MpegEncContext
* const s
= &h
->s
;
2784 src_cb
-= uvlinesize
;
2785 src_cr
-= uvlinesize
;
2787 // There are two lines saved, the line above the the top macroblock of a pair,
2788 // and the line above the bottom macroblock
2789 h
->left_border
[0]= h
->top_borders
[0][s
->mb_x
][15];
2790 for(i
=1; i
<17; i
++){
2791 h
->left_border
[i
]= src_y
[15+i
* linesize
];
2794 *(uint64_t*)(h
->top_borders
[0][s
->mb_x
]+0)= *(uint64_t*)(src_y
+ 16*linesize
);
2795 *(uint64_t*)(h
->top_borders
[0][s
->mb_x
]+8)= *(uint64_t*)(src_y
+8+16*linesize
);
2797 if(!(s
->flags
&CODEC_FLAG_GRAY
)){
2798 h
->left_border
[17 ]= h
->top_borders
[0][s
->mb_x
][16+7];
2799 h
->left_border
[17+9]= h
->top_borders
[0][s
->mb_x
][24+7];
2801 h
->left_border
[i
+17 ]= src_cb
[7+i
*uvlinesize
];
2802 h
->left_border
[i
+17+9]= src_cr
[7+i
*uvlinesize
];
2804 *(uint64_t*)(h
->top_borders
[0][s
->mb_x
]+16)= *(uint64_t*)(src_cb
+8*uvlinesize
);
2805 *(uint64_t*)(h
->top_borders
[0][s
->mb_x
]+24)= *(uint64_t*)(src_cr
+8*uvlinesize
);
2809 static inline void xchg_mb_border(H264Context
*h
, uint8_t *src_y
, uint8_t *src_cb
, uint8_t *src_cr
, int linesize
, int uvlinesize
, int xchg
){
2810 MpegEncContext
* const s
= &h
->s
;
2813 int deblock_left
= (s
->mb_x
> 0);
2814 int deblock_top
= (s
->mb_y
> 0);
2816 src_y
-= linesize
+ 1;
2817 src_cb
-= uvlinesize
+ 1;
2818 src_cr
-= uvlinesize
+ 1;
2820 #define XCHG(a,b,t,xchg)\
2827 for(i
= !deblock_top
; i
<17; i
++){
2828 XCHG(h
->left_border
[i
], src_y
[i
* linesize
], temp8
, xchg
);
2833 XCHG(*(uint64_t*)(h
->top_borders
[0][s
->mb_x
]+0), *(uint64_t*)(src_y
+1), temp64
, xchg
);
2834 XCHG(*(uint64_t*)(h
->top_borders
[0][s
->mb_x
]+8), *(uint64_t*)(src_y
+9), temp64
, 1);
2837 if(!(s
->flags
&CODEC_FLAG_GRAY
)){
2839 for(i
= !deblock_top
; i
<9; i
++){
2840 XCHG(h
->left_border
[i
+17 ], src_cb
[i
*uvlinesize
], temp8
, xchg
);
2841 XCHG(h
->left_border
[i
+17+9], src_cr
[i
*uvlinesize
], temp8
, xchg
);
2845 XCHG(*(uint64_t*)(h
->top_borders
[0][s
->mb_x
]+16), *(uint64_t*)(src_cb
+1), temp64
, 1);
2846 XCHG(*(uint64_t*)(h
->top_borders
[0][s
->mb_x
]+24), *(uint64_t*)(src_cr
+1), temp64
, 1);
2851 static inline void backup_pair_border(H264Context
*h
, uint8_t *src_y
, uint8_t *src_cb
, uint8_t *src_cr
, int linesize
, int uvlinesize
){
2852 MpegEncContext
* const s
= &h
->s
;
2855 src_y
-= 2 * linesize
;
2856 src_cb
-= 2 * uvlinesize
;
2857 src_cr
-= 2 * uvlinesize
;
2859 // There are two lines saved, the line above the the top macroblock of a pair,
2860 // and the line above the bottom macroblock
2861 h
->left_border
[0]= h
->top_borders
[0][s
->mb_x
][15];
2862 h
->left_border
[1]= h
->top_borders
[1][s
->mb_x
][15];
2863 for(i
=2; i
<34; i
++){
2864 h
->left_border
[i
]= src_y
[15+i
* linesize
];
2867 *(uint64_t*)(h
->top_borders
[0][s
->mb_x
]+0)= *(uint64_t*)(src_y
+ 32*linesize
);
2868 *(uint64_t*)(h
->top_borders
[0][s
->mb_x
]+8)= *(uint64_t*)(src_y
+8+32*linesize
);
2869 *(uint64_t*)(h
->top_borders
[1][s
->mb_x
]+0)= *(uint64_t*)(src_y
+ 33*linesize
);
2870 *(uint64_t*)(h
->top_borders
[1][s
->mb_x
]+8)= *(uint64_t*)(src_y
+8+33*linesize
);
2872 if(!(s
->flags
&CODEC_FLAG_GRAY
)){
2873 h
->left_border
[34 ]= h
->top_borders
[0][s
->mb_x
][16+7];
2874 h
->left_border
[34+ 1]= h
->top_borders
[1][s
->mb_x
][16+7];
2875 h
->left_border
[34+18 ]= h
->top_borders
[0][s
->mb_x
][24+7];
2876 h
->left_border
[34+18+1]= h
->top_borders
[1][s
->mb_x
][24+7];
2877 for(i
=2; i
<18; i
++){
2878 h
->left_border
[i
+34 ]= src_cb
[7+i
*uvlinesize
];
2879 h
->left_border
[i
+34+18]= src_cr
[7+i
*uvlinesize
];
2881 *(uint64_t*)(h
->top_borders
[0][s
->mb_x
]+16)= *(uint64_t*)(src_cb
+16*uvlinesize
);
2882 *(uint64_t*)(h
->top_borders
[0][s
->mb_x
]+24)= *(uint64_t*)(src_cr
+16*uvlinesize
);
2883 *(uint64_t*)(h
->top_borders
[1][s
->mb_x
]+16)= *(uint64_t*)(src_cb
+17*uvlinesize
);
2884 *(uint64_t*)(h
->top_borders
[1][s
->mb_x
]+24)= *(uint64_t*)(src_cr
+17*uvlinesize
);
2888 static inline void xchg_pair_border(H264Context
*h
, uint8_t *src_y
, uint8_t *src_cb
, uint8_t *src_cr
, int linesize
, int uvlinesize
, int xchg
){
2889 MpegEncContext
* const s
= &h
->s
;
2892 int deblock_left
= (s
->mb_x
> 0);
2893 int deblock_top
= (s
->mb_y
> 0);
2895 tprintf("xchg_pair_border: src_y:%p src_cb:%p src_cr:%p ls:%d uvls:%d\n", src_y
, src_cb
, src_cr
, linesize
, uvlinesize
);
2897 src_y
-= 2 * linesize
+ 1;
2898 src_cb
-= 2 * uvlinesize
+ 1;
2899 src_cr
-= 2 * uvlinesize
+ 1;
2901 #define XCHG(a,b,t,xchg)\
2908 for(i
= (!deblock_top
)<<1; i
<34; i
++){
2909 XCHG(h
->left_border
[i
], src_y
[i
* linesize
], temp8
, xchg
);
2914 XCHG(*(uint64_t*)(h
->top_borders
[0][s
->mb_x
]+0), *(uint64_t*)(src_y
+1), temp64
, xchg
);
2915 XCHG(*(uint64_t*)(h
->top_borders
[0][s
->mb_x
]+8), *(uint64_t*)(src_y
+9), temp64
, 1);
2916 XCHG(*(uint64_t*)(h
->top_borders
[1][s
->mb_x
]+0), *(uint64_t*)(src_y
+1 +linesize
), temp64
, xchg
);
2917 XCHG(*(uint64_t*)(h
->top_borders
[1][s
->mb_x
]+8), *(uint64_t*)(src_y
+9 +linesize
), temp64
, 1);
2920 if(!(s
->flags
&CODEC_FLAG_GRAY
)){
2922 for(i
= (!deblock_top
) << 1; i
<18; i
++){
2923 XCHG(h
->left_border
[i
+34 ], src_cb
[i
*uvlinesize
], temp8
, xchg
);
2924 XCHG(h
->left_border
[i
+34+18], src_cr
[i
*uvlinesize
], temp8
, xchg
);
2928 XCHG(*(uint64_t*)(h
->top_borders
[0][s
->mb_x
]+16), *(uint64_t*)(src_cb
+1), temp64
, 1);
2929 XCHG(*(uint64_t*)(h
->top_borders
[0][s
->mb_x
]+24), *(uint64_t*)(src_cr
+1), temp64
, 1);
2930 XCHG(*(uint64_t*)(h
->top_borders
[1][s
->mb_x
]+16), *(uint64_t*)(src_cb
+1 +uvlinesize
), temp64
, 1);
2931 XCHG(*(uint64_t*)(h
->top_borders
[1][s
->mb_x
]+24), *(uint64_t*)(src_cr
+1 +uvlinesize
), temp64
, 1);