2 * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
3 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * H.264 / AVC / MPEG4 part10 codec.
24 * @author Michael Niedermayer <michaelni@gmx.at>
30 #include "mpegvideo.h"
39 #define interlaced_dct interlaced_dct_is_a_bad_name
40 #define mb_intra mb_intra_isnt_initalized_see_mb_type
42 #define LUMA_DC_BLOCK_INDEX 25
43 #define CHROMA_DC_BLOCK_INDEX 26
45 #define CHROMA_DC_COEFF_TOKEN_VLC_BITS 8
46 #define COEFF_TOKEN_VLC_BITS 8
47 #define TOTAL_ZEROS_VLC_BITS 9
48 #define CHROMA_DC_TOTAL_ZEROS_VLC_BITS 3
49 #define RUN_VLC_BITS 3
50 #define RUN7_VLC_BITS 6
52 #define MAX_SPS_COUNT 32
53 #define MAX_PPS_COUNT 256
55 #define MAX_MMCO_COUNT 66
58 * Sequence parameter set
64 int log2_max_frame_num
; ///< log2_max_frame_num_minus4 + 4
65 int poc_type
; ///< pic_order_cnt_type
66 int log2_max_poc_lsb
; ///< log2_max_pic_order_cnt_lsb_minus4
67 int delta_pic_order_always_zero_flag
;
68 int offset_for_non_ref_pic
;
69 int offset_for_top_to_bottom_field
;
70 int poc_cycle_length
; ///< num_ref_frames_in_pic_order_cnt_cycle
71 int ref_frame_count
; ///< num_ref_frames
72 int gaps_in_frame_num_allowed_flag
;
73 int mb_width
; ///< frame_width_in_mbs_minus1 + 1
74 int mb_height
; ///< frame_height_in_mbs_minus1 + 1
75 int frame_mbs_only_flag
;
76 int mb_aff
; ///<mb_adaptive_frame_field_flag
77 int direct_8x8_inference_flag
;
78 int crop
; ///< frame_cropping_flag
79 int crop_left
; ///< frame_cropping_rect_left_offset
80 int crop_right
; ///< frame_cropping_rect_right_offset
81 int crop_top
; ///< frame_cropping_rect_top_offset
82 int crop_bottom
; ///< frame_cropping_rect_bottom_offset
83 int vui_parameters_present_flag
;
85 int timing_info_present_flag
;
86 uint32_t num_units_in_tick
;
88 int fixed_frame_rate_flag
;
89 short offset_for_ref_frame
[256]; //FIXME dyn aloc?
93 * Picture parameter set
97 int cabac
; ///< entropy_coding_mode_flag
98 int pic_order_present
; ///< pic_order_present_flag
99 int slice_group_count
; ///< num_slice_groups_minus1 + 1
100 int mb_slice_group_map_type
;
101 int ref_count
[2]; ///< num_ref_idx_l0/1_active_minus1 + 1
102 int weighted_pred
; ///< weighted_pred_flag
103 int weighted_bipred_idc
;
104 int init_qp
; ///< pic_init_qp_minus26 + 26
105 int init_qs
; ///< pic_init_qs_minus26 + 26
106 int chroma_qp_index_offset
;
107 int deblocking_filter_parameters_present
; ///< deblocking_filter_parameters_present_flag
108 int constrained_intra_pred
; ///< constrained_intra_pred_flag
109 int redundant_pic_cnt_present
; ///< redundant_pic_cnt_present_flag
113 * Memory management control operation opcode.
115 typedef enum MMCOOpcode
{
126 * Memory management control operation.
137 typedef struct H264Context
{
145 #define NAL_IDR_SLICE 5
149 #define NAL_PICTURE_DELIMITER 9
150 #define NAL_FILTER_DATA 10
151 uint8_t *rbsp_buffer
;
152 int rbsp_buffer_size
;
155 * Used to parse AVC variant of h264
157 int is_avc
; ///< this flag is != 0 if codec is avc1
158 int got_avcC
; ///< flag used to parse avcC data only once
159 int nal_length_size
; ///< Number of bytes used for nal length (1, 2 or 4)
163 int prev_mb_skiped
; //FIXME remove (IMHO not used)
166 int chroma_pred_mode
;
167 int intra16x16_pred_mode
;
169 int8_t intra4x4_pred_mode_cache
[5*8];
170 int8_t (*intra4x4_pred_mode
)[8];
171 void (*pred4x4
[9+3])(uint8_t *src
, uint8_t *topright
, int stride
);//FIXME move to dsp?
172 void (*pred8x8
[4+3])(uint8_t *src
, int stride
);
173 void (*pred16x16
[4+3])(uint8_t *src
, int stride
);
174 unsigned int topleft_samples_available
;
175 unsigned int top_samples_available
;
176 unsigned int topright_samples_available
;
177 unsigned int left_samples_available
;
178 uint8_t (*top_border
)[16+2*8];
179 uint8_t left_border
[17+2*9];
182 * non zero coeff count cache.
183 * is 64 if not available.
185 uint8_t non_zero_count_cache
[6*8];
186 uint8_t (*non_zero_count
)[16];
189 * Motion vector cache.
191 int16_t mv_cache
[2][5*8][2];
192 int8_t ref_cache
[2][5*8];
193 #define LIST_NOT_USED -1 //FIXME rename?
194 #define PART_NOT_AVAILABLE -2
197 * is 1 if the specific list MV&references are set to 0,0,-2.
199 int mv_cache_clean
[2];
201 int block_offset
[16+8];
202 int chroma_subblock_offset
[16]; //FIXME remove
204 uint16_t *mb2b_xy
; //FIXME are these 4 a good idea?
206 int b_stride
; //FIXME use s->b4_stride
212 int unknown_svq3_flag
;
213 int next_slice_index
;
215 SPS sps_buffer
[MAX_SPS_COUNT
];
216 SPS sps
; ///< current sps
218 PPS pps_buffer
[MAX_PPS_COUNT
];
222 PPS pps
; //FIXME move tp Picture perhaps? (->no) do we need that?
225 uint8_t *slice_table_base
;
226 uint8_t *slice_table
; ///< slice_table_base + mb_stride + 1
228 int slice_type_fixed
;
230 //interlacing specific flags
231 int mb_field_decoding_flag
;
238 int delta_poc_bottom
;
241 int prev_poc_msb
; ///< poc_msb of the last reference pic for POC type 0
242 int prev_poc_lsb
; ///< poc_lsb of the last reference pic for POC type 0
243 int frame_num_offset
; ///< for POC type 2
244 int prev_frame_num_offset
; ///< for POC type 2
245 int prev_frame_num
; ///< frame_num of the last pic for POC type 1/2
248 * frame_num for frames or 2*frame_num for field pics.
253 * max_frame_num or 2*max_frame_num for field pics.
257 //Weighted pred stuff
258 int luma_log2_weight_denom
;
259 int chroma_log2_weight_denom
;
260 int luma_weight
[2][16];
261 int luma_offset
[2][16];
262 int chroma_weight
[2][16][2];
263 int chroma_offset
[2][16][2];
266 int deblocking_filter
; ///< disable_deblocking_filter_idc with 1<->0
267 int slice_alpha_c0_offset
;
268 int slice_beta_offset
;
270 int redundant_pic_count
;
272 int direct_spatial_mv_pred
;
275 * num_ref_idx_l0/1_active_minus1 + 1
277 int ref_count
[2];// FIXME split for AFF
278 Picture
*short_ref
[16];
279 Picture
*long_ref
[16];
280 Picture default_ref_list
[2][32];
281 Picture ref_list
[2][32]; //FIXME size?
282 Picture field_ref_list
[2][32]; //FIXME size?
285 * memory management control operations buffer.
287 MMCO mmco
[MAX_MMCO_COUNT
];
290 int long_ref_count
; ///< number of actual long term references
291 int short_ref_count
; ///< number of actual short term references
294 GetBitContext intra_gb
;
295 GetBitContext inter_gb
;
296 GetBitContext
*intra_gb_ptr
;
297 GetBitContext
*inter_gb_ptr
;
299 DCTELEM mb
[16*24] __align8
;
305 uint8_t cabac_state
[399];
308 /* 0x100 -> non null luma_dc, 0x80/0x40 -> non null chroma_dc (cb/cr), 0x?0 -> chroma_cbp(0,1,2), 0x0? luma_cbp */
312 /* chroma_pred_mode for i4x4 or i16x16, else 0 */
313 uint8_t *chroma_pred_mode_table
;
314 int last_qscale_diff
;
315 int16_t (*mvd_table
[2])[2];
316 int16_t mvd_cache
[2][5*8][2];
320 static VLC coeff_token_vlc
[4];
321 static VLC chroma_dc_coeff_token_vlc
;
323 static VLC total_zeros_vlc
[15];
324 static VLC chroma_dc_total_zeros_vlc
[3];
326 static VLC run_vlc
[6];
329 static void svq3_luma_dc_dequant_idct_c(DCTELEM
*block
, int qp
);
330 static void svq3_add_idct_c(uint8_t *dst
, DCTELEM
*block
, int stride
, int qp
, int dc
);
331 static void filter_mb( H264Context
*h
, int mb_x
, int mb_y
, uint8_t *img_y
, uint8_t *img_cb
, uint8_t *img_cr
);
333 static inline uint32_t pack16to32(int a
, int b
){
334 #ifdef WORDS_BIGENDIAN
335 return (b
&0xFFFF) + (a
<<16);
337 return (a
&0xFFFF) + (b
<<16);
343 * @param h height of the rectangle, should be a constant
344 * @param w width of the rectangle, should be a constant
345 * @param size the size of val (1 or 4), should be a constant
347 static inline void fill_rectangle(void *vp
, int w
, int h
, int stride
, uint32_t val
, int size
){ //FIXME ensure this IS inlined
348 uint8_t *p
= (uint8_t*)vp
;
349 assert(size
==1 || size
==4);
354 //FIXME check what gcc generates for 64 bit on x86 and possible write a 32 bit ver of it
357 *(uint16_t*)(p
+ stride
)= size
==4 ? val
: val
*0x0101;
358 }else if(w
==2 && h
==4){
359 *(uint16_t*)(p
+ 0*stride
)=
360 *(uint16_t*)(p
+ 1*stride
)=
361 *(uint16_t*)(p
+ 2*stride
)=
362 *(uint16_t*)(p
+ 3*stride
)= size
==4 ? val
: val
*0x0101;
363 }else if(w
==4 && h
==1){
364 *(uint32_t*)(p
+ 0*stride
)= size
==4 ? val
: val
*0x01010101;
365 }else if(w
==4 && h
==2){
366 *(uint32_t*)(p
+ 0*stride
)=
367 *(uint32_t*)(p
+ 1*stride
)= size
==4 ? val
: val
*0x01010101;
368 }else if(w
==4 && h
==4){
369 *(uint32_t*)(p
+ 0*stride
)=
370 *(uint32_t*)(p
+ 1*stride
)=
371 *(uint32_t*)(p
+ 2*stride
)=
372 *(uint32_t*)(p
+ 3*stride
)= size
==4 ? val
: val
*0x01010101;
373 }else if(w
==8 && h
==1){
375 *(uint32_t*)(p
+ 4)= size
==4 ? val
: val
*0x01010101;
376 }else if(w
==8 && h
==2){
377 *(uint32_t*)(p
+ 0 + 0*stride
)=
378 *(uint32_t*)(p
+ 4 + 0*stride
)=
379 *(uint32_t*)(p
+ 0 + 1*stride
)=
380 *(uint32_t*)(p
+ 4 + 1*stride
)= size
==4 ? val
: val
*0x01010101;
381 }else if(w
==8 && h
==4){
382 *(uint64_t*)(p
+ 0*stride
)=
383 *(uint64_t*)(p
+ 1*stride
)=
384 *(uint64_t*)(p
+ 2*stride
)=
385 *(uint64_t*)(p
+ 3*stride
)= size
==4 ? val
*0x0100000001ULL
: val
*0x0101010101010101ULL
;
386 }else if(w
==16 && h
==2){
387 *(uint64_t*)(p
+ 0+0*stride
)=
388 *(uint64_t*)(p
+ 8+0*stride
)=
389 *(uint64_t*)(p
+ 0+1*stride
)=
390 *(uint64_t*)(p
+ 8+1*stride
)= size
==4 ? val
*0x0100000001ULL
: val
*0x0101010101010101ULL
;
391 }else if(w
==16 && h
==4){
392 *(uint64_t*)(p
+ 0+0*stride
)=
393 *(uint64_t*)(p
+ 8+0*stride
)=
394 *(uint64_t*)(p
+ 0+1*stride
)=
395 *(uint64_t*)(p
+ 8+1*stride
)=
396 *(uint64_t*)(p
+ 0+2*stride
)=
397 *(uint64_t*)(p
+ 8+2*stride
)=
398 *(uint64_t*)(p
+ 0+3*stride
)=
399 *(uint64_t*)(p
+ 8+3*stride
)= size
==4 ? val
*0x0100000001ULL
: val
*0x0101010101010101ULL
;
404 static inline void fill_caches(H264Context
*h
, int mb_type
){
405 MpegEncContext
* const s
= &h
->s
;
406 const int mb_xy
= s
->mb_x
+ s
->mb_y
*s
->mb_stride
;
407 int topleft_xy
, top_xy
, topright_xy
, left_xy
[2];
408 int topleft_type
, top_type
, topright_type
, left_type
[2];
412 //wow what a mess, why didnt they simplify the interlacing&intra stuff, i cant imagine that these complex rules are worth it
416 topleft_xy
= 0; /* avoid warning */
417 top_xy
= 0; /* avoid warning */
418 topright_xy
= 0; /* avoid warning */
420 topleft_xy
= mb_xy
-1 - s
->mb_stride
;
421 top_xy
= mb_xy
- s
->mb_stride
;
422 topright_xy
= mb_xy
+1 - s
->mb_stride
;
423 left_xy
[0] = mb_xy
-1;
424 left_xy
[1] = mb_xy
-1;
431 topleft_type
= h
->slice_table
[topleft_xy
] == h
->slice_num ? s
->current_picture
.mb_type
[topleft_xy
] : 0;
432 top_type
= h
->slice_table
[top_xy
] == h
->slice_num ? s
->current_picture
.mb_type
[top_xy
] : 0;
433 topright_type
= h
->slice_table
[topright_xy
] == h
->slice_num ? s
->current_picture
.mb_type
[topright_xy
]: 0;
434 left_type
[0] = h
->slice_table
[left_xy
[0] ] == h
->slice_num ? s
->current_picture
.mb_type
[left_xy
[0]] : 0;
435 left_type
[1] = h
->slice_table
[left_xy
[1] ] == h
->slice_num ? s
->current_picture
.mb_type
[left_xy
[1]] : 0;
437 if(IS_INTRA(mb_type
)){
438 h
->topleft_samples_available
=
439 h
->top_samples_available
=
440 h
->left_samples_available
= 0xFFFF;
441 h
->topright_samples_available
= 0xEEEA;
443 if(!IS_INTRA(top_type
) && (top_type
==0 || h
->pps
.constrained_intra_pred
)){
444 h
->topleft_samples_available
= 0xB3FF;
445 h
->top_samples_available
= 0x33FF;
446 h
->topright_samples_available
= 0x26EA;
449 if(!IS_INTRA(left_type
[i
]) && (left_type
[i
]==0 || h
->pps
.constrained_intra_pred
)){
450 h
->topleft_samples_available
&= 0xDF5F;
451 h
->left_samples_available
&= 0x5F5F;
455 if(!IS_INTRA(topleft_type
) && (topleft_type
==0 || h
->pps
.constrained_intra_pred
))
456 h
->topleft_samples_available
&= 0x7FFF;
458 if(!IS_INTRA(topright_type
) && (topright_type
==0 || h
->pps
.constrained_intra_pred
))
459 h
->topright_samples_available
&= 0xFBFF;
461 if(IS_INTRA4x4(mb_type
)){
462 if(IS_INTRA4x4(top_type
)){
463 h
->intra4x4_pred_mode_cache
[4+8*0]= h
->intra4x4_pred_mode
[top_xy
][4];
464 h
->intra4x4_pred_mode_cache
[5+8*0]= h
->intra4x4_pred_mode
[top_xy
][5];
465 h
->intra4x4_pred_mode_cache
[6+8*0]= h
->intra4x4_pred_mode
[top_xy
][6];
466 h
->intra4x4_pred_mode_cache
[7+8*0]= h
->intra4x4_pred_mode
[top_xy
][3];
469 if(IS_INTRA16x16(top_type
) || (IS_INTER(top_type
) && !h
->pps
.constrained_intra_pred
))
474 h
->intra4x4_pred_mode_cache
[4+8*0]=
475 h
->intra4x4_pred_mode_cache
[5+8*0]=
476 h
->intra4x4_pred_mode_cache
[6+8*0]=
477 h
->intra4x4_pred_mode_cache
[7+8*0]= pred
;
480 if(IS_INTRA4x4(left_type
[i
])){
481 h
->intra4x4_pred_mode_cache
[3+8*1 + 2*8*i
]= h
->intra4x4_pred_mode
[left_xy
[i
]][left_block
[0+2*i
]];
482 h
->intra4x4_pred_mode_cache
[3+8*2 + 2*8*i
]= h
->intra4x4_pred_mode
[left_xy
[i
]][left_block
[1+2*i
]];
485 if(IS_INTRA16x16(left_type
[i
]) || (IS_INTER(left_type
[i
]) && !h
->pps
.constrained_intra_pred
))
490 h
->intra4x4_pred_mode_cache
[3+8*1 + 2*8*i
]=
491 h
->intra4x4_pred_mode_cache
[3+8*2 + 2*8*i
]= pred
;
506 //FIXME constraint_intra_pred & partitioning & nnz (lets hope this is just a typo in the spec)
508 h
->non_zero_count_cache
[4+8*0]= h
->non_zero_count
[top_xy
][0];
509 h
->non_zero_count_cache
[5+8*0]= h
->non_zero_count
[top_xy
][1];
510 h
->non_zero_count_cache
[6+8*0]= h
->non_zero_count
[top_xy
][2];
511 h
->non_zero_count_cache
[7+8*0]= h
->non_zero_count
[top_xy
][3];
513 h
->non_zero_count_cache
[1+8*0]= h
->non_zero_count
[top_xy
][7];
514 h
->non_zero_count_cache
[2+8*0]= h
->non_zero_count
[top_xy
][8];
516 h
->non_zero_count_cache
[1+8*3]= h
->non_zero_count
[top_xy
][10];
517 h
->non_zero_count_cache
[2+8*3]= h
->non_zero_count
[top_xy
][11];
519 h
->top_cbp
= h
->cbp_table
[top_xy
];
521 h
->non_zero_count_cache
[4+8*0]=
522 h
->non_zero_count_cache
[5+8*0]=
523 h
->non_zero_count_cache
[6+8*0]=
524 h
->non_zero_count_cache
[7+8*0]=
526 h
->non_zero_count_cache
[1+8*0]=
527 h
->non_zero_count_cache
[2+8*0]=
529 h
->non_zero_count_cache
[1+8*3]=
530 h
->non_zero_count_cache
[2+8*3]= h
->pps
.cabac
&& !IS_INTRA(mb_type
) ?
0 : 64;
532 if(IS_INTRA(mb_type
)) h
->top_cbp
= 0x1C0;
537 h
->non_zero_count_cache
[3+8*1]= h
->non_zero_count
[left_xy
[0]][6];
538 h
->non_zero_count_cache
[3+8*2]= h
->non_zero_count
[left_xy
[0]][5];
539 h
->non_zero_count_cache
[0+8*1]= h
->non_zero_count
[left_xy
[0]][9]; //FIXME left_block
540 h
->non_zero_count_cache
[0+8*4]= h
->non_zero_count
[left_xy
[0]][12];
541 h
->left_cbp
= h
->cbp_table
[left_xy
[0]]; //FIXME interlacing
543 h
->non_zero_count_cache
[3+8*1]=
544 h
->non_zero_count_cache
[3+8*2]=
545 h
->non_zero_count_cache
[0+8*1]=
546 h
->non_zero_count_cache
[0+8*4]= h
->pps
.cabac
&& !IS_INTRA(mb_type
) ?
0 : 64;
548 if(IS_INTRA(mb_type
)) h
->left_cbp
= 0x1C0;//FIXME interlacing
553 h
->non_zero_count_cache
[3+8*3]= h
->non_zero_count
[left_xy
[1]][4];
554 h
->non_zero_count_cache
[3+8*4]= h
->non_zero_count
[left_xy
[1]][3];
555 h
->non_zero_count_cache
[0+8*2]= h
->non_zero_count
[left_xy
[1]][8];
556 h
->non_zero_count_cache
[0+8*5]= h
->non_zero_count
[left_xy
[1]][11];
558 h
->non_zero_count_cache
[3+8*3]=
559 h
->non_zero_count_cache
[3+8*4]=
560 h
->non_zero_count_cache
[0+8*2]=
561 h
->non_zero_count_cache
[0+8*5]= h
->pps
.cabac
&& !IS_INTRA(mb_type
) ?
0 : 64;
565 if(IS_INTER(mb_type
)){
567 for(list
=0; list
<2; list
++){
568 if((!IS_8X8(mb_type
)) && !USES_LIST(mb_type
, list
)){
569 /*if(!h->mv_cache_clean[list]){
570 memset(h->mv_cache [list], 0, 8*5*2*sizeof(int16_t)); //FIXME clean only input? clean at all?
571 memset(h->ref_cache[list], PART_NOT_AVAILABLE, 8*5*sizeof(int8_t));
572 h->mv_cache_clean[list]= 1;
574 continue; //FIXME direct mode ...
576 h
->mv_cache_clean
[list
]= 0;
578 if(IS_INTER(topleft_type
)){
579 const int b_xy
= h
->mb2b_xy
[topleft_xy
] + 3 + 3*h
->b_stride
;
580 const int b8_xy
= h
->mb2b8_xy
[topleft_xy
] + 1 + h
->b8_stride
;
581 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] - 1 - 1*8]= *(uint32_t*)s
->current_picture
.motion_val
[list
][b_xy
];
582 h
->ref_cache
[list
][scan8
[0] - 1 - 1*8]= s
->current_picture
.ref_index
[list
][b8_xy
];
584 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] - 1 - 1*8]= 0;
585 h
->ref_cache
[list
][scan8
[0] - 1 - 1*8]= topleft_type ? LIST_NOT_USED
: PART_NOT_AVAILABLE
;
588 if(IS_INTER(top_type
)){
589 const int b_xy
= h
->mb2b_xy
[top_xy
] + 3*h
->b_stride
;
590 const int b8_xy
= h
->mb2b8_xy
[top_xy
] + h
->b8_stride
;
591 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] + 0 - 1*8]= *(uint32_t*)s
->current_picture
.motion_val
[list
][b_xy
+ 0];
592 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] + 1 - 1*8]= *(uint32_t*)s
->current_picture
.motion_val
[list
][b_xy
+ 1];
593 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] + 2 - 1*8]= *(uint32_t*)s
->current_picture
.motion_val
[list
][b_xy
+ 2];
594 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] + 3 - 1*8]= *(uint32_t*)s
->current_picture
.motion_val
[list
][b_xy
+ 3];
595 h
->ref_cache
[list
][scan8
[0] + 0 - 1*8]=
596 h
->ref_cache
[list
][scan8
[0] + 1 - 1*8]= s
->current_picture
.ref_index
[list
][b8_xy
+ 0];
597 h
->ref_cache
[list
][scan8
[0] + 2 - 1*8]=
598 h
->ref_cache
[list
][scan8
[0] + 3 - 1*8]= s
->current_picture
.ref_index
[list
][b8_xy
+ 1];
600 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] + 0 - 1*8]=
601 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] + 1 - 1*8]=
602 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] + 2 - 1*8]=
603 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] + 3 - 1*8]= 0;
604 *(uint32_t*)&h
->ref_cache
[list
][scan8
[0] + 0 - 1*8]= ((top_type ? LIST_NOT_USED
: PART_NOT_AVAILABLE
)&0xFF)*0x01010101;
607 if(IS_INTER(topright_type
)){
608 const int b_xy
= h
->mb2b_xy
[topright_xy
] + 3*h
->b_stride
;
609 const int b8_xy
= h
->mb2b8_xy
[topright_xy
] + h
->b8_stride
;
610 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] + 4 - 1*8]= *(uint32_t*)s
->current_picture
.motion_val
[list
][b_xy
];
611 h
->ref_cache
[list
][scan8
[0] + 4 - 1*8]= s
->current_picture
.ref_index
[list
][b8_xy
];
613 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] + 4 - 1*8]= 0;
614 h
->ref_cache
[list
][scan8
[0] + 4 - 1*8]= topright_type ? LIST_NOT_USED
: PART_NOT_AVAILABLE
;
617 //FIXME unify cleanup or sth
618 if(IS_INTER(left_type
[0])){
619 const int b_xy
= h
->mb2b_xy
[left_xy
[0]] + 3;
620 const int b8_xy
= h
->mb2b8_xy
[left_xy
[0]] + 1;
621 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] - 1 + 0*8]= *(uint32_t*)s
->current_picture
.motion_val
[list
][b_xy
+ h
->b_stride
*left_block
[0]];
622 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] - 1 + 1*8]= *(uint32_t*)s
->current_picture
.motion_val
[list
][b_xy
+ h
->b_stride
*left_block
[1]];
623 h
->ref_cache
[list
][scan8
[0] - 1 + 0*8]=
624 h
->ref_cache
[list
][scan8
[0] - 1 + 1*8]= s
->current_picture
.ref_index
[list
][b8_xy
+ h
->b8_stride
*(left_block
[0]>>1)];
626 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] - 1 + 0*8]=
627 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] - 1 + 1*8]= 0;
628 h
->ref_cache
[list
][scan8
[0] - 1 + 0*8]=
629 h
->ref_cache
[list
][scan8
[0] - 1 + 1*8]= left_type
[0] ? LIST_NOT_USED
: PART_NOT_AVAILABLE
;
632 if(IS_INTER(left_type
[1])){
633 const int b_xy
= h
->mb2b_xy
[left_xy
[1]] + 3;
634 const int b8_xy
= h
->mb2b8_xy
[left_xy
[1]] + 1;
635 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] - 1 + 2*8]= *(uint32_t*)s
->current_picture
.motion_val
[list
][b_xy
+ h
->b_stride
*left_block
[2]];
636 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] - 1 + 3*8]= *(uint32_t*)s
->current_picture
.motion_val
[list
][b_xy
+ h
->b_stride
*left_block
[3]];
637 h
->ref_cache
[list
][scan8
[0] - 1 + 2*8]=
638 h
->ref_cache
[list
][scan8
[0] - 1 + 3*8]= s
->current_picture
.ref_index
[list
][b8_xy
+ h
->b8_stride
*(left_block
[2]>>1)];
640 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] - 1 + 2*8]=
641 *(uint32_t*)h
->mv_cache
[list
][scan8
[0] - 1 + 3*8]= 0;
642 h
->ref_cache
[list
][scan8
[0] - 1 + 2*8]=
643 h
->ref_cache
[list
][scan8
[0] - 1 + 3*8]= left_type
[0] ? LIST_NOT_USED
: PART_NOT_AVAILABLE
;
646 h
->ref_cache
[list
][scan8
[5 ]+1] =
647 h
->ref_cache
[list
][scan8
[7 ]+1] =
648 h
->ref_cache
[list
][scan8
[13]+1] = //FIXME remove past 3 (init somewher else)
649 h
->ref_cache
[list
][scan8
[4 ]] =
650 h
->ref_cache
[list
][scan8
[12]] = PART_NOT_AVAILABLE
;
651 *(uint32_t*)h
->mv_cache
[list
][scan8
[5 ]+1]=
652 *(uint32_t*)h
->mv_cache
[list
][scan8
[7 ]+1]=
653 *(uint32_t*)h
->mv_cache
[list
][scan8
[13]+1]= //FIXME remove past 3 (init somewher else)
654 *(uint32_t*)h
->mv_cache
[list
][scan8
[4 ]]=
655 *(uint32_t*)h
->mv_cache
[list
][scan8
[12]]= 0;
658 /* XXX beurk, Load mvd */
659 if(IS_INTER(topleft_type
)){
660 const int b_xy
= h
->mb2b_xy
[topleft_xy
] + 3 + 3*h
->b_stride
;
661 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] - 1 - 1*8]= *(uint32_t*)h
->mvd_table
[list
][b_xy
];
663 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] - 1 - 1*8]= 0;
666 if(IS_INTER(top_type
)){
667 const int b_xy
= h
->mb2b_xy
[top_xy
] + 3*h
->b_stride
;
668 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] + 0 - 1*8]= *(uint32_t*)h
->mvd_table
[list
][b_xy
+ 0];
669 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] + 1 - 1*8]= *(uint32_t*)h
->mvd_table
[list
][b_xy
+ 1];
670 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] + 2 - 1*8]= *(uint32_t*)h
->mvd_table
[list
][b_xy
+ 2];
671 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] + 3 - 1*8]= *(uint32_t*)h
->mvd_table
[list
][b_xy
+ 3];
673 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] + 0 - 1*8]=
674 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] + 1 - 1*8]=
675 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] + 2 - 1*8]=
676 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] + 3 - 1*8]= 0;
678 if(IS_INTER(left_type
[0])){
679 const int b_xy
= h
->mb2b_xy
[left_xy
[0]] + 3;
680 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] - 1 + 0*8]= *(uint32_t*)h
->mvd_table
[list
][b_xy
+ h
->b_stride
*left_block
[0]];
681 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] - 1 + 1*8]= *(uint32_t*)h
->mvd_table
[list
][b_xy
+ h
->b_stride
*left_block
[1]];
683 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] - 1 + 0*8]=
684 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] - 1 + 1*8]= 0;
686 if(IS_INTER(left_type
[1])){
687 const int b_xy
= h
->mb2b_xy
[left_xy
[1]] + 3;
688 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] - 1 + 2*8]= *(uint32_t*)h
->mvd_table
[list
][b_xy
+ h
->b_stride
*left_block
[2]];
689 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] - 1 + 3*8]= *(uint32_t*)h
->mvd_table
[list
][b_xy
+ h
->b_stride
*left_block
[3]];
691 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] - 1 + 2*8]=
692 *(uint32_t*)h
->mvd_cache
[list
][scan8
[0] - 1 + 3*8]= 0;
694 *(uint32_t*)h
->mvd_cache
[list
][scan8
[5 ]+1]=
695 *(uint32_t*)h
->mvd_cache
[list
][scan8
[7 ]+1]=
696 *(uint32_t*)h
->mvd_cache
[list
][scan8
[13]+1]= //FIXME remove past 3 (init somewher else)
697 *(uint32_t*)h
->mvd_cache
[list
][scan8
[4 ]]=
698 *(uint32_t*)h
->mvd_cache
[list
][scan8
[12]]= 0;
706 static inline void write_back_intra_pred_mode(H264Context
*h
){
707 MpegEncContext
* const s
= &h
->s
;
708 const int mb_xy
= s
->mb_x
+ s
->mb_y
*s
->mb_stride
;
710 h
->intra4x4_pred_mode
[mb_xy
][0]= h
->intra4x4_pred_mode_cache
[7+8*1];
711 h
->intra4x4_pred_mode
[mb_xy
][1]= h
->intra4x4_pred_mode_cache
[7+8*2];
712 h
->intra4x4_pred_mode
[mb_xy
][2]= h
->intra4x4_pred_mode_cache
[7+8*3];
713 h
->intra4x4_pred_mode
[mb_xy
][3]= h
->intra4x4_pred_mode_cache
[7+8*4];
714 h
->intra4x4_pred_mode
[mb_xy
][4]= h
->intra4x4_pred_mode_cache
[4+8*4];
715 h
->intra4x4_pred_mode
[mb_xy
][5]= h
->intra4x4_pred_mode_cache
[5+8*4];
716 h
->intra4x4_pred_mode
[mb_xy
][6]= h
->intra4x4_pred_mode_cache
[6+8*4];
720 * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
722 static inline int check_intra4x4_pred_mode(H264Context
*h
){
723 MpegEncContext
* const s
= &h
->s
;
724 static const int8_t top
[12]= {-1, 0,LEFT_DC_PRED
,-1,-1,-1,-1,-1, 0};
725 static const int8_t left
[12]= { 0,-1, TOP_DC_PRED
, 0,-1,-1,-1, 0,-1,DC_128_PRED
};
728 if(!(h
->top_samples_available
&0x8000)){
730 int status
= top
[ h
->intra4x4_pred_mode_cache
[scan8
[0] + i
] ];
732 av_log(h
->s
.avctx
, AV_LOG_ERROR
, "top block unavailable for requested intra4x4 mode %d at %d %d\n", status
, s
->mb_x
, s
->mb_y
);
735 h
->intra4x4_pred_mode_cache
[scan8
[0] + i
]= status
;
740 if(!(h
->left_samples_available
&0x8000)){
742 int status
= left
[ h
->intra4x4_pred_mode_cache
[scan8
[0] + 8*i
] ];
744 av_log(h
->s
.avctx
, AV_LOG_ERROR
, "left block unavailable for requested intra4x4 mode %d at %d %d\n", status
, s
->mb_x
, s
->mb_y
);
747 h
->intra4x4_pred_mode_cache
[scan8
[0] + 8*i
]= status
;
753 } //FIXME cleanup like next
756 * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
758 static inline int check_intra_pred_mode(H264Context
*h
, int mode
){
759 MpegEncContext
* const s
= &h
->s
;
760 static const int8_t top
[7]= {LEFT_DC_PRED8x8
, 1,-1,-1};
761 static const int8_t left
[7]= { TOP_DC_PRED8x8
,-1, 2,-1,DC_128_PRED8x8
};
763 if(mode
< 0 || mode
> 6) {
764 av_log(h
->s
.avctx
, AV_LOG_ERROR
, "out of range intra chroma pred mode at %d %d\n", s
->mb_x
, s
->mb_y
);
768 if(!(h
->top_samples_available
&0x8000)){
771 av_log(h
->s
.avctx
, AV_LOG_ERROR
, "top block unavailable for requested intra mode at %d %d\n", s
->mb_x
, s
->mb_y
);
776 if(!(h
->left_samples_available
&0x8000)){
779 av_log(h
->s
.avctx
, AV_LOG_ERROR
, "left block unavailable for requested intra mode at %d %d\n", s
->mb_x
, s
->mb_y
);
788 * gets the predicted intra4x4 prediction mode.
790 static inline int pred_intra_mode(H264Context
*h
, int n
){
791 const int index8
= scan8
[n
];
792 const int left
= h
->intra4x4_pred_mode_cache
[index8
- 1];
793 const int top
= h
->intra4x4_pred_mode_cache
[index8
- 8];
794 const int min
= FFMIN(left
, top
);
796 tprintf("mode:%d %d min:%d\n", left
,top
, min
);
798 if(min
<0) return DC_PRED
;
802 static inline void write_back_non_zero_count(H264Context
*h
){
803 MpegEncContext
* const s
= &h
->s
;
804 const int mb_xy
= s
->mb_x
+ s
->mb_y
*s
->mb_stride
;
806 h
->non_zero_count
[mb_xy
][0]= h
->non_zero_count_cache
[4+8*4];
807 h
->non_zero_count
[mb_xy
][1]= h
->non_zero_count_cache
[5+8*4];
808 h
->non_zero_count
[mb_xy
][2]= h
->non_zero_count_cache
[6+8*4];
809 h
->non_zero_count
[mb_xy
][3]= h
->non_zero_count_cache
[7+8*4];
810 h
->non_zero_count
[mb_xy
][4]= h
->non_zero_count_cache
[7+8*3];
811 h
->non_zero_count
[mb_xy
][5]= h
->non_zero_count_cache
[7+8*2];
812 h
->non_zero_count
[mb_xy
][6]= h
->non_zero_count_cache
[7+8*1];
814 h
->non_zero_count
[mb_xy
][7]= h
->non_zero_count_cache
[1+8*2];
815 h
->non_zero_count
[mb_xy
][8]= h
->non_zero_count_cache
[2+8*2];
816 h
->non_zero_count
[mb_xy
][9]= h
->non_zero_count_cache
[2+8*1];
818 h
->non_zero_count
[mb_xy
][10]=h
->non_zero_count_cache
[1+8*5];
819 h
->non_zero_count
[mb_xy
][11]=h
->non_zero_count_cache
[2+8*5];
820 h
->non_zero_count
[mb_xy
][12]=h
->non_zero_count_cache
[2+8*4];
824 * gets the predicted number of non zero coefficients.
825 * @param n block index
827 static inline int pred_non_zero_count(H264Context
*h
, int n
){
828 const int index8
= scan8
[n
];
829 const int left
= h
->non_zero_count_cache
[index8
- 1];
830 const int top
= h
->non_zero_count_cache
[index8
- 8];
833 if(i
<64) i
= (i
+1)>>1;
835 tprintf("pred_nnz L%X T%X n%d s%d P%X\n", left
, top
, n
, scan8
[n
], i
&31);
840 static inline int fetch_diagonal_mv(H264Context
*h
, const int16_t **C
, int i
, int list
, int part_width
){
841 const int topright_ref
= h
->ref_cache
[list
][ i
- 8 + part_width
];
843 if(topright_ref
!= PART_NOT_AVAILABLE
){
844 *C
= h
->mv_cache
[list
][ i
- 8 + part_width
];
847 tprintf("topright MV not available\n");
849 *C
= h
->mv_cache
[list
][ i
- 8 - 1 ];
850 return h
->ref_cache
[list
][ i
- 8 - 1 ];
855 * gets the predicted MV.
856 * @param n the block index
857 * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
858 * @param mx the x component of the predicted motion vector
859 * @param my the y component of the predicted motion vector
861 static inline void pred_motion(H264Context
* const h
, int n
, int part_width
, int list
, int ref
, int * const mx
, int * const my
){
862 const int index8
= scan8
[n
];
863 const int top_ref
= h
->ref_cache
[list
][ index8
- 8 ];
864 const int left_ref
= h
->ref_cache
[list
][ index8
- 1 ];
865 const int16_t * const A
= h
->mv_cache
[list
][ index8
- 1 ];
866 const int16_t * const B
= h
->mv_cache
[list
][ index8
- 8 ];
868 int diagonal_ref
, match_count
;
870 assert(part_width
==1 || part_width
==2 || part_width
==4);
880 diagonal_ref
= fetch_diagonal_mv(h
, &C
, index8
, list
, part_width
);
881 match_count
= (diagonal_ref
==ref
) + (top_ref
==ref
) + (left_ref
==ref
);
882 if(match_count
> 1){ //most common
883 *mx
= mid_pred(A
[0], B
[0], C
[0]);
884 *my
= mid_pred(A
[1], B
[1], C
[1]);
885 }else if(match_count
==1){
889 }else if(top_ref
==ref
){
897 if(top_ref
== PART_NOT_AVAILABLE
&& diagonal_ref
== PART_NOT_AVAILABLE
&& left_ref
!= PART_NOT_AVAILABLE
){
901 *mx
= mid_pred(A
[0], B
[0], C
[0]);
902 *my
= mid_pred(A
[1], B
[1], C
[1]);
906 tprintf("pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref
, B
[0], B
[1], diagonal_ref
, C
[0], C
[1], left_ref
, A
[0], A
[1], ref
, *mx
, *my
, h
->s
.mb_x
, h
->s
.mb_y
, n
, list
);
910 * gets the directionally predicted 16x8 MV.
911 * @param n the block index
912 * @param mx the x component of the predicted motion vector
913 * @param my the y component of the predicted motion vector
915 static inline void pred_16x8_motion(H264Context
* const h
, int n
, int list
, int ref
, int * const mx
, int * const my
){
917 const int top_ref
= h
->ref_cache
[list
][ scan8
[0] - 8 ];
918 const int16_t * const B
= h
->mv_cache
[list
][ scan8
[0] - 8 ];
920 tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d", top_ref
, B
[0], B
[1], h
->s
.mb_x
, h
->s
.mb_y
, n
, list
);
928 const int left_ref
= h
->ref_cache
[list
][ scan8
[8] - 1 ];
929 const int16_t * const A
= h
->mv_cache
[list
][ scan8
[8] - 1 ];
931 tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d", left_ref
, A
[0], A
[1], h
->s
.mb_x
, h
->s
.mb_y
, n
, list
);
941 pred_motion(h
, n
, 4, list
, ref
, mx
, my
);
945 * gets the directionally predicted 8x16 MV.
946 * @param n the block index
947 * @param mx the x component of the predicted motion vector
948 * @param my the y component of the predicted motion vector
950 static inline void pred_8x16_motion(H264Context
* const h
, int n
, int list
, int ref
, int * const mx
, int * const my
){
952 const int left_ref
= h
->ref_cache
[list
][ scan8
[0] - 1 ];
953 const int16_t * const A
= h
->mv_cache
[list
][ scan8
[0] - 1 ];
955 tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d", left_ref
, A
[0], A
[1], h
->s
.mb_x
, h
->s
.mb_y
, n
, list
);
966 diagonal_ref
= fetch_diagonal_mv(h
, &C
, scan8
[4], list
, 2);
968 tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d", diagonal_ref
, C
[0], C
[1], h
->s
.mb_x
, h
->s
.mb_y
, n
, list
);
970 if(diagonal_ref
== ref
){
978 pred_motion(h
, n
, 2, list
, ref
, mx
, my
);
981 static inline void pred_pskip_motion(H264Context
* const h
, int * const mx
, int * const my
){
982 const int top_ref
= h
->ref_cache
[0][ scan8
[0] - 8 ];
983 const int left_ref
= h
->ref_cache
[0][ scan8
[0] - 1 ];
985 tprintf("pred_pskip: (%d) (%d) at %2d %2d\n", top_ref
, left_ref
, h
->s
.mb_x
, h
->s
.mb_y
);
987 if(top_ref
== PART_NOT_AVAILABLE
|| left_ref
== PART_NOT_AVAILABLE
988 || (top_ref
== 0 && *(uint32_t*)h
->mv_cache
[0][ scan8
[0] - 8 ] == 0)
989 || (left_ref
== 0 && *(uint32_t*)h
->mv_cache
[0][ scan8
[0] - 1 ] == 0)){
995 pred_motion(h
, 0, 4, 0, 0, mx
, my
);
1000 static inline void write_back_motion(H264Context
*h
, int mb_type
){
1001 MpegEncContext
* const s
= &h
->s
;
1002 const int b_xy
= 4*s
->mb_x
+ 4*s
->mb_y
*h
->b_stride
;
1003 const int b8_xy
= 2*s
->mb_x
+ 2*s
->mb_y
*h
->b8_stride
;
1006 for(list
=0; list
<2; list
++){
1008 if((!IS_8X8(mb_type
)) && !USES_LIST(mb_type
, list
)){
1009 if(1){ //FIXME skip or never read if mb_type doesnt use it
1011 *(uint64_t*)s
->current_picture
.motion_val
[list
][b_xy
+ 0 + y
*h
->b_stride
]=
1012 *(uint64_t*)s
->current_picture
.motion_val
[list
][b_xy
+ 2 + y
*h
->b_stride
]= 0;
1014 if( h
->pps
.cabac
) {
1015 /* FIXME needed ? */
1017 *(uint64_t*)h
->mvd_table
[list
][b_xy
+ 0 + y
*h
->b_stride
]=
1018 *(uint64_t*)h
->mvd_table
[list
][b_xy
+ 2 + y
*h
->b_stride
]= 0;
1022 *(uint16_t*)&s
->current_picture
.ref_index
[list
][b8_xy
+ y
*h
->b8_stride
]= (LIST_NOT_USED
&0xFF)*0x0101;
1025 continue; //FIXME direct mode ...
1029 *(uint64_t*)s
->current_picture
.motion_val
[list
][b_xy
+ 0 + y
*h
->b_stride
]= *(uint64_t*)h
->mv_cache
[list
][scan8
[0]+0 + 8*y
];
1030 *(uint64_t*)s
->current_picture
.motion_val
[list
][b_xy
+ 2 + y
*h
->b_stride
]= *(uint64_t*)h
->mv_cache
[list
][scan8
[0]+2 + 8*y
];
1032 if( h
->pps
.cabac
) {
1034 *(uint64_t*)h
->mvd_table
[list
][b_xy
+ 0 + y
*h
->b_stride
]= *(uint64_t*)h
->mvd_cache
[list
][scan8
[0]+0 + 8*y
];
1035 *(uint64_t*)h
->mvd_table
[list
][b_xy
+ 2 + y
*h
->b_stride
]= *(uint64_t*)h
->mvd_cache
[list
][scan8
[0]+2 + 8*y
];
1039 s
->current_picture
.ref_index
[list
][b8_xy
+ 0 + y
*h
->b8_stride
]= h
->ref_cache
[list
][scan8
[0]+0 + 16*y
];
1040 s
->current_picture
.ref_index
[list
][b8_xy
+ 1 + y
*h
->b8_stride
]= h
->ref_cache
[list
][scan8
[0]+2 + 16*y
];
1046 * Decodes a network abstraction layer unit.
1047 * @param consumed is the number of bytes used as input
1048 * @param length is the length of the array
1049 * @param dst_length is the number of decoded bytes FIXME here or a decode rbsp ttailing?
1050 * @returns decoded bytes, might be src+1 if no escapes
1052 static uint8_t *decode_nal(H264Context
*h
, uint8_t *src
, int *dst_length
, int *consumed
, int length
){
1056 // src[0]&0x80; //forbidden bit
1057 h
->nal_ref_idc
= src
[0]>>5;
1058 h
->nal_unit_type
= src
[0]&0x1F;
1062 for(i
=0; i
<length
; i
++)
1063 printf("%2X ", src
[i
]);
1065 for(i
=0; i
+1<length
; i
+=2){
1066 if(src
[i
]) continue;
1067 if(i
>0 && src
[i
-1]==0) i
--;
1068 if(i
+2<length
&& src
[i
+1]==0 && src
[i
+2]<=3){
1070 /* startcode, so we must be past the end */
1077 if(i
>=length
-1){ //no escaped 0
1078 *dst_length
= length
;
1079 *consumed
= length
+1; //+1 for the header
1083 h
->rbsp_buffer
= av_fast_realloc(h
->rbsp_buffer
, &h
->rbsp_buffer_size
, length
);
1084 dst
= h
->rbsp_buffer
;
1086 //printf("deoding esc\n");
1089 //remove escapes (very rare 1:2^22)
1090 if(si
+2<length
&& src
[si
]==0 && src
[si
+1]==0 && src
[si
+2]<=3){
1091 if(src
[si
+2]==3){ //escape
1096 }else //next start code
1100 dst
[di
++]= src
[si
++];
1104 *consumed
= si
+ 1;//+1 for the header
1105 //FIXME store exact number of bits in the getbitcontext (its needed for decoding)
1111 * @param src the data which should be escaped
1112 * @param dst the target buffer, dst+1 == src is allowed as a special case
1113 * @param length the length of the src data
1114 * @param dst_length the length of the dst array
1115 * @returns length of escaped data in bytes or -1 if an error occured
1117 static int encode_nal(H264Context
*h
, uint8_t *dst
, uint8_t *src
, int length
, int dst_length
){
1118 int i
, escape_count
, si
, di
;
1122 assert(dst_length
>0);
1124 dst
[0]= (h
->nal_ref_idc
<<5) + h
->nal_unit_type
;
1126 if(length
==0) return 1;
1129 for(i
=0; i
<length
; i
+=2){
1130 if(src
[i
]) continue;
1131 if(i
>0 && src
[i
-1]==0)
1133 if(i
+2<length
&& src
[i
+1]==0 && src
[i
+2]<=3){
1139 if(escape_count
==0){
1141 memcpy(dst
+1, src
, length
);
1145 if(length
+ escape_count
+ 1> dst_length
)
1148 //this should be damn rare (hopefully)
1150 h
->rbsp_buffer
= av_fast_realloc(h
->rbsp_buffer
, &h
->rbsp_buffer_size
, length
+ escape_count
);
1151 temp
= h
->rbsp_buffer
;
1152 //printf("encoding esc\n");
1157 if(si
+2<length
&& src
[si
]==0 && src
[si
+1]==0 && src
[si
+2]<=3){
1158 temp
[di
++]= 0; si
++;
1159 temp
[di
++]= 0; si
++;
1161 temp
[di
++]= src
[si
++];
1164 temp
[di
++]= src
[si
++];
1166 memcpy(dst
+1, temp
, length
+escape_count
);
1168 assert(di
== length
+escape_count
);
1174 * write 1,10,100,1000,... for alignment, yes its exactly inverse to mpeg4
1176 static void encode_rbsp_trailing(PutBitContext
*pb
){
1179 length
= (-put_bits_count(pb
))&7;
1180 if(length
) put_bits(pb
, length
, 0);
1185 * identifies the exact end of the bitstream
1186 * @return the length of the trailing, or 0 if damaged
1188 static int decode_rbsp_trailing(uint8_t *src
){
1192 tprintf("rbsp trailing %X\n", v
);
1202 * idct tranforms the 16 dc values and dequantize them.
1203 * @param qp quantization parameter
1205 static void h264_luma_dc_dequant_idct_c(DCTELEM
*block
, int qp
){
1206 const int qmul
= dequant_coeff
[qp
][0];
1209 int temp
[16]; //FIXME check if this is a good idea
1210 static const int x_offset
[4]={0, 1*stride
, 4* stride
, 5*stride
};
1211 static const int y_offset
[4]={0, 2*stride
, 8* stride
, 10*stride
};
1213 //memset(block, 64, 2*256);
1216 const int offset
= y_offset
[i
];
1217 const int z0
= block
[offset
+stride
*0] + block
[offset
+stride
*4];
1218 const int z1
= block
[offset
+stride
*0] - block
[offset
+stride
*4];
1219 const int z2
= block
[offset
+stride
*1] - block
[offset
+stride
*5];
1220 const int z3
= block
[offset
+stride
*1] + block
[offset
+stride
*5];
1229 const int offset
= x_offset
[i
];
1230 const int z0
= temp
[4*0+i
] + temp
[4*2+i
];
1231 const int z1
= temp
[4*0+i
] - temp
[4*2+i
];
1232 const int z2
= temp
[4*1+i
] - temp
[4*3+i
];
1233 const int z3
= temp
[4*1+i
] + temp
[4*3+i
];
1235 block
[stride
*0 +offset
]= ((z0
+ z3
)*qmul
+ 2)>>2; //FIXME think about merging this into decode_resdual
1236 block
[stride
*2 +offset
]= ((z1
+ z2
)*qmul
+ 2)>>2;
1237 block
[stride
*8 +offset
]= ((z1
- z2
)*qmul
+ 2)>>2;
1238 block
[stride
*10+offset
]= ((z0
- z3
)*qmul
+ 2)>>2;
1244 * dct tranforms the 16 dc values.
1245 * @param qp quantization parameter ??? FIXME
1247 static void h264_luma_dc_dct_c(DCTELEM
*block
/*, int qp*/){
1248 // const int qmul= dequant_coeff[qp][0];
1250 int temp
[16]; //FIXME check if this is a good idea
1251 static const int x_offset
[4]={0, 1*stride
, 4* stride
, 5*stride
};
1252 static const int y_offset
[4]={0, 2*stride
, 8* stride
, 10*stride
};
1255 const int offset
= y_offset
[i
];
1256 const int z0
= block
[offset
+stride
*0] + block
[offset
+stride
*4];
1257 const int z1
= block
[offset
+stride
*0] - block
[offset
+stride
*4];
1258 const int z2
= block
[offset
+stride
*1] - block
[offset
+stride
*5];
1259 const int z3
= block
[offset
+stride
*1] + block
[offset
+stride
*5];
1268 const int offset
= x_offset
[i
];
1269 const int z0
= temp
[4*0+i
] + temp
[4*2+i
];
1270 const int z1
= temp
[4*0+i
] - temp
[4*2+i
];
1271 const int z2
= temp
[4*1+i
] - temp
[4*3+i
];
1272 const int z3
= temp
[4*1+i
] + temp
[4*3+i
];
1274 block
[stride
*0 +offset
]= (z0
+ z3
)>>1;
1275 block
[stride
*2 +offset
]= (z1
+ z2
)>>1;
1276 block
[stride
*8 +offset
]= (z1
- z2
)>>1;
1277 block
[stride
*10+offset
]= (z0
- z3
)>>1;
1285 static void chroma_dc_dequant_idct_c(DCTELEM
*block
, int qp
){
1286 const int qmul
= dequant_coeff
[qp
][0];
1287 const int stride
= 16*2;
1288 const int xStride
= 16;
1291 a
= block
[stride
*0 + xStride
*0];
1292 b
= block
[stride
*0 + xStride
*1];
1293 c
= block
[stride
*1 + xStride
*0];
1294 d
= block
[stride
*1 + xStride
*1];
1301 block
[stride
*0 + xStride
*0]= ((a
+c
)*qmul
+ 0)>>1;
1302 block
[stride
*0 + xStride
*1]= ((e
+b
)*qmul
+ 0)>>1;
1303 block
[stride
*1 + xStride
*0]= ((a
-c
)*qmul
+ 0)>>1;
1304 block
[stride
*1 + xStride
*1]= ((e
-b
)*qmul
+ 0)>>1;
1308 static void chroma_dc_dct_c(DCTELEM
*block
){
1309 const int stride
= 16*2;
1310 const int xStride
= 16;
1313 a
= block
[stride
*0 + xStride
*0];
1314 b
= block
[stride
*0 + xStride
*1];
1315 c
= block
[stride
*1 + xStride
*0];
1316 d
= block
[stride
*1 + xStride
*1];
1323 block
[stride
*0 + xStride
*0]= (a
+c
);
1324 block
[stride
*0 + xStride
*1]= (e
+b
);
1325 block
[stride
*1 + xStride
*0]= (a
-c
);
1326 block
[stride
*1 + xStride
*1]= (e
-b
);
1331 * gets the chroma qp.
1333 static inline int get_chroma_qp(H264Context
*h
, int qscale
){
1335 return chroma_qp
[clip(qscale
+ h
->pps
.chroma_qp_index_offset
, 0, 51)];
1340 static void h264_diff_dct_c(DCTELEM
*block
, uint8_t *src1
, uint8_t *src2
, int stride
){
1342 //FIXME try int temp instead of block
1345 const int d0
= src1
[0 + i
*stride
] - src2
[0 + i
*stride
];
1346 const int d1
= src1
[1 + i
*stride
] - src2
[1 + i
*stride
];
1347 const int d2
= src1
[2 + i
*stride
] - src2
[2 + i
*stride
];
1348 const int d3
= src1
[3 + i
*stride
] - src2
[3 + i
*stride
];
1349 const int z0
= d0
+ d3
;
1350 const int z3
= d0
- d3
;
1351 const int z1
= d1
+ d2
;
1352 const int z2
= d1
- d2
;
1354 block
[0 + 4*i
]= z0
+ z1
;
1355 block
[1 + 4*i
]= 2*z3
+ z2
;
1356 block
[2 + 4*i
]= z0
- z1
;
1357 block
[3 + 4*i
]= z3
- 2*z2
;
1361 const int z0
= block
[0*4 + i
] + block
[3*4 + i
];
1362 const int z3
= block
[0*4 + i
] - block
[3*4 + i
];
1363 const int z1
= block
[1*4 + i
] + block
[2*4 + i
];
1364 const int z2
= block
[1*4 + i
] - block
[2*4 + i
];
1366 block
[0*4 + i
]= z0
+ z1
;
1367 block
[1*4 + i
]= 2*z3
+ z2
;
1368 block
[2*4 + i
]= z0
- z1
;
1369 block
[3*4 + i
]= z3
- 2*z2
;
1374 //FIXME need to check that this doesnt overflow signed 32 bit for low qp, iam not sure, its very close
1375 //FIXME check that gcc inlines this (and optimizes intra & seperate_dc stuff away)
1376 static inline int quantize_c(DCTELEM
*block
, uint8_t *scantable
, int qscale
, int intra
, int seperate_dc
){
1378 const int * const quant_table
= quant_coeff
[qscale
];
1379 const int bias
= intra ?
(1<<QUANT_SHIFT
)/3 : (1<<QUANT_SHIFT
)/6;
1380 const unsigned int threshold1
= (1<<QUANT_SHIFT
) - bias
- 1;
1381 const unsigned int threshold2
= (threshold1
<<1);
1387 const int dc_bias
= intra ?
(1<<(QUANT_SHIFT
-2))/3 : (1<<(QUANT_SHIFT
-2))/6;
1388 const unsigned int dc_threshold1
= (1<<(QUANT_SHIFT
-2)) - dc_bias
- 1;
1389 const unsigned int dc_threshold2
= (dc_threshold1
<<1);
1391 int level
= block
[0]*quant_coeff
[qscale
+18][0];
1392 if(((unsigned)(level
+dc_threshold1
))>dc_threshold2
){
1394 level
= (dc_bias
+ level
)>>(QUANT_SHIFT
-2);
1397 level
= (dc_bias
- level
)>>(QUANT_SHIFT
-2);
1400 // last_non_zero = i;
1405 const int dc_bias
= intra ?
(1<<(QUANT_SHIFT
+1))/3 : (1<<(QUANT_SHIFT
+1))/6;
1406 const unsigned int dc_threshold1
= (1<<(QUANT_SHIFT
+1)) - dc_bias
- 1;
1407 const unsigned int dc_threshold2
= (dc_threshold1
<<1);
1409 int level
= block
[0]*quant_table
[0];
1410 if(((unsigned)(level
+dc_threshold1
))>dc_threshold2
){
1412 level
= (dc_bias
+ level
)>>(QUANT_SHIFT
+1);
1415 level
= (dc_bias
- level
)>>(QUANT_SHIFT
+1);
1418 // last_non_zero = i;
1431 const int j
= scantable
[i
];
1432 int level
= block
[j
]*quant_table
[j
];
1434 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
1435 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
1436 if(((unsigned)(level
+threshold1
))>threshold2
){
1438 level
= (bias
+ level
)>>QUANT_SHIFT
;
1441 level
= (bias
- level
)>>QUANT_SHIFT
;
1450 return last_non_zero
;
1453 static void pred4x4_vertical_c(uint8_t *src
, uint8_t *topright
, int stride
){
1454 const uint32_t a
= ((uint32_t*)(src
-stride
))[0];
1455 ((uint32_t*)(src
+0*stride
))[0]= a
;
1456 ((uint32_t*)(src
+1*stride
))[0]= a
;
1457 ((uint32_t*)(src
+2*stride
))[0]= a
;
1458 ((uint32_t*)(src
+3*stride
))[0]= a
;
1461 static void pred4x4_horizontal_c(uint8_t *src
, uint8_t *topright
, int stride
){
1462 ((uint32_t*)(src
+0*stride
))[0]= src
[-1+0*stride
]*0x01010101;
1463 ((uint32_t*)(src
+1*stride
))[0]= src
[-1+1*stride
]*0x01010101;
1464 ((uint32_t*)(src
+2*stride
))[0]= src
[-1+2*stride
]*0x01010101;
1465 ((uint32_t*)(src
+3*stride
))[0]= src
[-1+3*stride
]*0x01010101;
1468 static void pred4x4_dc_c(uint8_t *src
, uint8_t *topright
, int stride
){
1469 const int dc
= ( src
[-stride
] + src
[1-stride
] + src
[2-stride
] + src
[3-stride
]
1470 + src
[-1+0*stride
] + src
[-1+1*stride
] + src
[-1+2*stride
] + src
[-1+3*stride
] + 4) >>3;
1472 ((uint32_t*)(src
+0*stride
))[0]=
1473 ((uint32_t*)(src
+1*stride
))[0]=
1474 ((uint32_t*)(src
+2*stride
))[0]=
1475 ((uint32_t*)(src
+3*stride
))[0]= dc
* 0x01010101;
1478 static void pred4x4_left_dc_c(uint8_t *src
, uint8_t *topright
, int stride
){
1479 const int dc
= ( src
[-1+0*stride
] + src
[-1+1*stride
] + src
[-1+2*stride
] + src
[-1+3*stride
] + 2) >>2;
1481 ((uint32_t*)(src
+0*stride
))[0]=
1482 ((uint32_t*)(src
+1*stride
))[0]=
1483 ((uint32_t*)(src
+2*stride
))[0]=
1484 ((uint32_t*)(src
+3*stride
))[0]= dc
* 0x01010101;
1487 static void pred4x4_top_dc_c(uint8_t *src
, uint8_t *topright
, int stride
){
1488 const int dc
= ( src
[-stride
] + src
[1-stride
] + src
[2-stride
] + src
[3-stride
] + 2) >>2;
1490 ((uint32_t*)(src
+0*stride
))[0]=
1491 ((uint32_t*)(src
+1*stride
))[0]=
1492 ((uint32_t*)(src
+2*stride
))[0]=
1493 ((uint32_t*)(src
+3*stride
))[0]= dc
* 0x01010101;
1496 static void pred4x4_128_dc_c(uint8_t *src
, uint8_t *topright
, int stride
){
1497 ((uint32_t*)(src
+0*stride
))[0]=
1498 ((uint32_t*)(src
+1*stride
))[0]=
1499 ((uint32_t*)(src
+2*stride
))[0]=
1500 ((uint32_t*)(src
+3*stride
))[0]= 128U*0x01010101U
;
1504 #define LOAD_TOP_RIGHT_EDGE\
1505 const int t4= topright[0];\
1506 const int t5= topright[1];\
1507 const int t6= topright[2];\
1508 const int t7= topright[3];\
1510 #define LOAD_LEFT_EDGE\
1511 const int l0= src[-1+0*stride];\
1512 const int l1= src[-1+1*stride];\
1513 const int l2= src[-1+2*stride];\
1514 const int l3= src[-1+3*stride];\
1516 #define LOAD_TOP_EDGE\
1517 const int t0= src[ 0-1*stride];\
1518 const int t1= src[ 1-1*stride];\
1519 const int t2= src[ 2-1*stride];\
1520 const int t3= src[ 3-1*stride];\
1522 static void pred4x4_down_right_c(uint8_t *src, uint8_t *topright, int stride){
1523 const int lt
= src
[-1-1*stride
];
1527 src
[0+3*stride
]=(l3
+ 2*l2
+ l1
+ 2)>>2;
1529 src
[1+3*stride
]=(l2
+ 2*l1
+ l0
+ 2)>>2;
1532 src
[2+3*stride
]=(l1
+ 2*l0
+ lt
+ 2)>>2;
1536 src
[3+3*stride
]=(l0
+ 2*lt
+ t0
+ 2)>>2;
1539 src
[3+2*stride
]=(lt
+ 2*t0
+ t1
+ 2)>>2;
1541 src
[3+1*stride
]=(t0
+ 2*t1
+ t2
+ 2)>>2;
1542 src
[3+0*stride
]=(t1
+ 2*t2
+ t3
+ 2)>>2;
1545 static void pred4x4_down_left_c(uint8_t *src
, uint8_t *topright
, int stride
){
1550 src
[0+0*stride
]=(t0
+ t2
+ 2*t1
+ 2)>>2;
1552 src
[0+1*stride
]=(t1
+ t3
+ 2*t2
+ 2)>>2;
1555 src
[0+2*stride
]=(t2
+ t4
+ 2*t3
+ 2)>>2;
1559 src
[0+3*stride
]=(t3
+ t5
+ 2*t4
+ 2)>>2;
1562 src
[1+3*stride
]=(t4
+ t6
+ 2*t5
+ 2)>>2;
1564 src
[2+3*stride
]=(t5
+ t7
+ 2*t6
+ 2)>>2;
1565 src
[3+3*stride
]=(t6
+ 3*t7
+ 2)>>2;
1568 static void pred4x4_vertical_right_c(uint8_t *src
, uint8_t *topright
, int stride
){
1569 const int lt
= src
[-1-1*stride
];
1572 const __attribute__((unused
)) int unu
= l3
;
1575 src
[1+2*stride
]=(lt
+ t0
+ 1)>>1;
1577 src
[2+2*stride
]=(t0
+ t1
+ 1)>>1;
1579 src
[3+2*stride
]=(t1
+ t2
+ 1)>>1;
1580 src
[3+0*stride
]=(t2
+ t3
+ 1)>>1;
1582 src
[1+3*stride
]=(l0
+ 2*lt
+ t0
+ 2)>>2;
1584 src
[2+3*stride
]=(lt
+ 2*t0
+ t1
+ 2)>>2;
1586 src
[3+3*stride
]=(t0
+ 2*t1
+ t2
+ 2)>>2;
1587 src
[3+1*stride
]=(t1
+ 2*t2
+ t3
+ 2)>>2;
1588 src
[0+2*stride
]=(lt
+ 2*l0
+ l1
+ 2)>>2;
1589 src
[0+3*stride
]=(l0
+ 2*l1
+ l2
+ 2)>>2;
1592 static void pred4x4_vertical_left_c(uint8_t *src
, uint8_t *topright
, int stride
){
1595 const __attribute__((unused
)) int unu
= t7
;
1597 src
[0+0*stride
]=(t0
+ t1
+ 1)>>1;
1599 src
[0+2*stride
]=(t1
+ t2
+ 1)>>1;
1601 src
[1+2*stride
]=(t2
+ t3
+ 1)>>1;
1603 src
[2+2*stride
]=(t3
+ t4
+ 1)>>1;
1604 src
[3+2*stride
]=(t4
+ t5
+ 1)>>1;
1605 src
[0+1*stride
]=(t0
+ 2*t1
+ t2
+ 2)>>2;
1607 src
[0+3*stride
]=(t1
+ 2*t2
+ t3
+ 2)>>2;
1609 src
[1+3*stride
]=(t2
+ 2*t3
+ t4
+ 2)>>2;
1611 src
[2+3*stride
]=(t3
+ 2*t4
+ t5
+ 2)>>2;
1612 src
[3+3*stride
]=(t4
+ 2*t5
+ t6
+ 2)>>2;
1615 static void pred4x4_horizontal_up_c(uint8_t *src
, uint8_t *topright
, int stride
){
1618 src
[0+0*stride
]=(l0
+ l1
+ 1)>>1;
1619 src
[1+0*stride
]=(l0
+ 2*l1
+ l2
+ 2)>>2;
1621 src
[0+1*stride
]=(l1
+ l2
+ 1)>>1;
1623 src
[1+1*stride
]=(l1
+ 2*l2
+ l3
+ 2)>>2;
1625 src
[0+2*stride
]=(l2
+ l3
+ 1)>>1;
1627 src
[1+2*stride
]=(l2
+ 2*l3
+ l3
+ 2)>>2;
1636 static void pred4x4_horizontal_down_c(uint8_t *src
, uint8_t *topright
, int stride
){
1637 const int lt
= src
[-1-1*stride
];
1640 const __attribute__((unused
)) int unu
= t3
;
1643 src
[2+1*stride
]=(lt
+ l0
+ 1)>>1;
1645 src
[3+1*stride
]=(l0
+ 2*lt
+ t0
+ 2)>>2;
1646 src
[2+0*stride
]=(lt
+ 2*t0
+ t1
+ 2)>>2;
1647 src
[3+0*stride
]=(t0
+ 2*t1
+ t2
+ 2)>>2;
1649 src
[2+2*stride
]=(l0
+ l1
+ 1)>>1;
1651 src
[3+2*stride
]=(lt
+ 2*l0
+ l1
+ 2)>>2;
1653 src
[2+3*stride
]=(l1
+ l2
+ 1)>>1;
1655 src
[3+3*stride
]=(l0
+ 2*l1
+ l2
+ 2)>>2;
1656 src
[0+3*stride
]=(l2
+ l3
+ 1)>>1;
1657 src
[1+3*stride
]=(l1
+ 2*l2
+ l3
+ 2)>>2;
1660 static void pred16x16_vertical_c(uint8_t *src
, int stride
){
1662 const uint32_t a
= ((uint32_t*)(src
-stride
))[0];
1663 const uint32_t b
= ((uint32_t*)(src
-stride
))[1];
1664 const uint32_t c
= ((uint32_t*)(src
-stride
))[2];
1665 const uint32_t d
= ((uint32_t*)(src
-stride
))[3];
1667 for(i
=0; i
<16; i
++){
1668 ((uint32_t*)(src
+i
*stride
))[0]= a
;
1669 ((uint32_t*)(src
+i
*stride
))[1]= b
;
1670 ((uint32_t*)(src
+i
*stride
))[2]= c
;
1671 ((uint32_t*)(src
+i
*stride
))[3]= d
;
1675 static void pred16x16_horizontal_c(uint8_t *src
, int stride
){
1678 for(i
=0; i
<16; i
++){
1679 ((uint32_t*)(src
+i
*stride
))[0]=
1680 ((uint32_t*)(src
+i
*stride
))[1]=
1681 ((uint32_t*)(src
+i
*stride
))[2]=
1682 ((uint32_t*)(src
+i
*stride
))[3]= src
[-1+i
*stride
]*0x01010101;
1686 static void pred16x16_dc_c(uint8_t *src
, int stride
){
1690 dc
+= src
[-1+i
*stride
];
1697 dc
= 0x01010101*((dc
+ 16)>>5);
1699 for(i
=0; i
<16; i
++){
1700 ((uint32_t*)(src
+i
*stride
))[0]=
1701 ((uint32_t*)(src
+i
*stride
))[1]=
1702 ((uint32_t*)(src
+i
*stride
))[2]=
1703 ((uint32_t*)(src
+i
*stride
))[3]= dc
;
1707 static void pred16x16_left_dc_c(uint8_t *src
, int stride
){
1711 dc
+= src
[-1+i
*stride
];
1714 dc
= 0x01010101*((dc
+ 8)>>4);
1716 for(i
=0; i
<16; i
++){
1717 ((uint32_t*)(src
+i
*stride
))[0]=
1718 ((uint32_t*)(src
+i
*stride
))[1]=
1719 ((uint32_t*)(src
+i
*stride
))[2]=
1720 ((uint32_t*)(src
+i
*stride
))[3]= dc
;
1724 static void pred16x16_top_dc_c(uint8_t *src
, int stride
){
1730 dc
= 0x01010101*((dc
+ 8)>>4);
1732 for(i
=0; i
<16; i
++){
1733 ((uint32_t*)(src
+i
*stride
))[0]=
1734 ((uint32_t*)(src
+i
*stride
))[1]=
1735 ((uint32_t*)(src
+i
*stride
))[2]=
1736 ((uint32_t*)(src
+i
*stride
))[3]= dc
;
1740 static void pred16x16_128_dc_c(uint8_t *src
, int stride
){
1743 for(i
=0; i
<16; i
++){
1744 ((uint32_t*)(src
+i
*stride
))[0]=
1745 ((uint32_t*)(src
+i
*stride
))[1]=
1746 ((uint32_t*)(src
+i
*stride
))[2]=
1747 ((uint32_t*)(src
+i
*stride
))[3]= 0x01010101U
*128U;
1751 static inline void pred16x16_plane_compat_c(uint8_t *src
, int stride
, const int svq3
){
1754 uint8_t *cm
= cropTbl
+ MAX_NEG_CROP
;
1755 const uint8_t * const src0
= src
+7-stride
;
1756 const uint8_t *src1
= src
+8*stride
-1;
1757 const uint8_t *src2
= src1
-2*stride
; // == src+6*stride-1;
1758 int H
= src0
[1] - src0
[-1];
1759 int V
= src1
[0] - src2
[ 0];
1760 for(k
=2; k
<=8; ++k
) {
1761 src1
+= stride
; src2
-= stride
;
1762 H
+= k
*(src0
[k
] - src0
[-k
]);
1763 V
+= k
*(src1
[0] - src2
[ 0]);
1766 H
= ( 5*(H
/4) ) / 16;
1767 V
= ( 5*(V
/4) ) / 16;
1769 /* required for 100% accuracy */
1770 i
= H
; H
= V
; V
= i
;
1772 H
= ( 5*H
+32 ) >> 6;
1773 V
= ( 5*V
+32 ) >> 6;
1776 a
= 16*(src1
[0] + src2
[16] + 1) - 7*(V
+H
);
1777 for(j
=16; j
>0; --j
) {
1780 for(i
=-16; i
<0; i
+=4) {
1781 src
[16+i
] = cm
[ (b
) >> 5 ];
1782 src
[17+i
] = cm
[ (b
+ H
) >> 5 ];
1783 src
[18+i
] = cm
[ (b
+2*H
) >> 5 ];
1784 src
[19+i
] = cm
[ (b
+3*H
) >> 5 ];
1791 static void pred16x16_plane_c(uint8_t *src
, int stride
){
1792 pred16x16_plane_compat_c(src
, stride
, 0);
1795 static void pred8x8_vertical_c(uint8_t *src
, int stride
){
1797 const uint32_t a
= ((uint32_t*)(src
-stride
))[0];
1798 const uint32_t b
= ((uint32_t*)(src
-stride
))[1];
1801 ((uint32_t*)(src
+i
*stride
))[0]= a
;
1802 ((uint32_t*)(src
+i
*stride
))[1]= b
;
1806 static void pred8x8_horizontal_c(uint8_t *src
, int stride
){
1810 ((uint32_t*)(src
+i
*stride
))[0]=
1811 ((uint32_t*)(src
+i
*stride
))[1]= src
[-1+i
*stride
]*0x01010101;
1815 static void pred8x8_128_dc_c(uint8_t *src
, int stride
){
1819 ((uint32_t*)(src
+i
*stride
))[0]=
1820 ((uint32_t*)(src
+i
*stride
))[1]= 0x01010101U
*128U;
1823 ((uint32_t*)(src
+i
*stride
))[0]=
1824 ((uint32_t*)(src
+i
*stride
))[1]= 0x01010101U
*128U;
1828 static void pred8x8_left_dc_c(uint8_t *src
, int stride
){
1834 dc0
+= src
[-1+i
*stride
];
1835 dc2
+= src
[-1+(i
+4)*stride
];
1837 dc0
= 0x01010101*((dc0
+ 2)>>2);
1838 dc2
= 0x01010101*((dc2
+ 2)>>2);
1841 ((uint32_t*)(src
+i
*stride
))[0]=
1842 ((uint32_t*)(src
+i
*stride
))[1]= dc0
;
1845 ((uint32_t*)(src
+i
*stride
))[0]=
1846 ((uint32_t*)(src
+i
*stride
))[1]= dc2
;
1850 static void pred8x8_top_dc_c(uint8_t *src
, int stride
){
1856 dc0
+= src
[i
-stride
];
1857 dc1
+= src
[4+i
-stride
];
1859 dc0
= 0x01010101*((dc0
+ 2)>>2);
1860 dc1
= 0x01010101*((dc1
+ 2)>>2);
1863 ((uint32_t*)(src
+i
*stride
))[0]= dc0
;
1864 ((uint32_t*)(src
+i
*stride
))[1]= dc1
;
1867 ((uint32_t*)(src
+i
*stride
))[0]= dc0
;
1868 ((uint32_t*)(src
+i
*stride
))[1]= dc1
;
1873 static void pred8x8_dc_c(uint8_t *src
, int stride
){
1875 int dc0
, dc1
, dc2
, dc3
;
1879 dc0
+= src
[-1+i
*stride
] + src
[i
-stride
];
1880 dc1
+= src
[4+i
-stride
];
1881 dc2
+= src
[-1+(i
+4)*stride
];
1883 dc3
= 0x01010101*((dc1
+ dc2
+ 4)>>3);
1884 dc0
= 0x01010101*((dc0
+ 4)>>3);
1885 dc1
= 0x01010101*((dc1
+ 2)>>2);
1886 dc2
= 0x01010101*((dc2
+ 2)>>2);
1889 ((uint32_t*)(src
+i
*stride
))[0]= dc0
;
1890 ((uint32_t*)(src
+i
*stride
))[1]= dc1
;
1893 ((uint32_t*)(src
+i
*stride
))[0]= dc2
;
1894 ((uint32_t*)(src
+i
*stride
))[1]= dc3
;
1898 static void pred8x8_plane_c(uint8_t *src
, int stride
){
1901 uint8_t *cm
= cropTbl
+ MAX_NEG_CROP
;
1902 const uint8_t * const src0
= src
+3-stride
;
1903 const uint8_t *src1
= src
+4*stride
-1;
1904 const uint8_t *src2
= src1
-2*stride
; // == src+2*stride-1;
1905 int H
= src0
[1] - src0
[-1];
1906 int V
= src1
[0] - src2
[ 0];
1907 for(k
=2; k
<=4; ++k
) {
1908 src1
+= stride
; src2
-= stride
;
1909 H
+= k
*(src0
[k
] - src0
[-k
]);
1910 V
+= k
*(src1
[0] - src2
[ 0]);
1912 H
= ( 17*H
+16 ) >> 5;
1913 V
= ( 17*V
+16 ) >> 5;
1915 a
= 16*(src1
[0] + src2
[8]+1) - 3*(V
+H
);
1916 for(j
=8; j
>0; --j
) {
1919 src
[0] = cm
[ (b
) >> 5 ];
1920 src
[1] = cm
[ (b
+ H
) >> 5 ];
1921 src
[2] = cm
[ (b
+2*H
) >> 5 ];
1922 src
[3] = cm
[ (b
+3*H
) >> 5 ];
1923 src
[4] = cm
[ (b
+4*H
) >> 5 ];
1924 src
[5] = cm
[ (b
+5*H
) >> 5 ];
1925 src
[6] = cm
[ (b
+6*H
) >> 5 ];
1926 src
[7] = cm
[ (b
+7*H
) >> 5 ];
1931 static inline void mc_dir_part(H264Context
*h
, Picture
*pic
, int n
, int square
, int chroma_height
, int delta
, int list
,
1932 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
1933 int src_x_offset
, int src_y_offset
,
1934 qpel_mc_func
*qpix_op
, h264_chroma_mc_func chroma_op
){
1935 MpegEncContext
* const s
= &h
->s
;
1936 const int mx
= h
->mv_cache
[list
][ scan8
[n
] ][0] + src_x_offset
*8;
1937 const int my
= h
->mv_cache
[list
][ scan8
[n
] ][1] + src_y_offset
*8;
1938 const int luma_xy
= (mx
&3) + ((my
&3)<<2);
1939 uint8_t * src_y
= pic
->data
[0] + (mx
>>2) + (my
>>2)*s
->linesize
;
1940 uint8_t * src_cb
= pic
->data
[1] + (mx
>>3) + (my
>>3)*s
->uvlinesize
;
1941 uint8_t * src_cr
= pic
->data
[2] + (mx
>>3) + (my
>>3)*s
->uvlinesize
;
1942 int extra_width
= (s
->flags
&CODEC_FLAG_EMU_EDGE
) ?
0 : 16; //FIXME increase edge?, IMHO not worth it
1943 int extra_height
= extra_width
;
1945 const int full_mx
= mx
>>2;
1946 const int full_my
= my
>>2;
1948 assert(pic
->data
[0]);
1950 if(mx
&7) extra_width
-= 3;
1951 if(my
&7) extra_height
-= 3;
1953 if( full_mx
< 0-extra_width
1954 || full_my
< 0-extra_height
1955 || full_mx
+ 16/*FIXME*/ > s
->width
+ extra_width
1956 || full_my
+ 16/*FIXME*/ > s
->height
+ extra_height
){
1957 ff_emulated_edge_mc(s
->edge_emu_buffer
, src_y
- 2 - 2*s
->linesize
, s
->linesize
, 16+5, 16+5/*FIXME*/, full_mx
-2, full_my
-2, s
->width
, s
->height
);
1958 src_y
= s
->edge_emu_buffer
+ 2 + 2*s
->linesize
;
1962 qpix_op
[luma_xy
](dest_y
, src_y
, s
->linesize
); //FIXME try variable height perhaps?
1964 qpix_op
[luma_xy
](dest_y
+ delta
, src_y
+ delta
, s
->linesize
);
1967 if(s
->flags
&CODEC_FLAG_GRAY
) return;
1970 ff_emulated_edge_mc(s
->edge_emu_buffer
, src_cb
, s
->uvlinesize
, 9, 9/*FIXME*/, (mx
>>3), (my
>>3), s
->width
>>1, s
->height
>>1);
1971 src_cb
= s
->edge_emu_buffer
;
1973 chroma_op(dest_cb
, src_cb
, s
->uvlinesize
, chroma_height
, mx
&7, my
&7);
1976 ff_emulated_edge_mc(s
->edge_emu_buffer
, src_cr
, s
->uvlinesize
, 9, 9/*FIXME*/, (mx
>>3), (my
>>3), s
->width
>>1, s
->height
>>1);
1977 src_cr
= s
->edge_emu_buffer
;
1979 chroma_op(dest_cr
, src_cr
, s
->uvlinesize
, chroma_height
, mx
&7, my
&7);
1982 static inline void mc_part(H264Context
*h
, int n
, int square
, int chroma_height
, int delta
,
1983 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
1984 int x_offset
, int y_offset
,
1985 qpel_mc_func
*qpix_put
, h264_chroma_mc_func chroma_put
,
1986 qpel_mc_func
*qpix_avg
, h264_chroma_mc_func chroma_avg
,
1987 int list0
, int list1
){
1988 MpegEncContext
* const s
= &h
->s
;
1989 qpel_mc_func
*qpix_op
= qpix_put
;
1990 h264_chroma_mc_func chroma_op
= chroma_put
;
1992 dest_y
+= 2*x_offset
+ 2*y_offset
*s
-> linesize
;
1993 dest_cb
+= x_offset
+ y_offset
*s
->uvlinesize
;
1994 dest_cr
+= x_offset
+ y_offset
*s
->uvlinesize
;
1995 x_offset
+= 8*s
->mb_x
;
1996 y_offset
+= 8*s
->mb_y
;
1999 Picture
*ref
= &h
->ref_list
[0][ h
->ref_cache
[0][ scan8
[n
] ] ];
2000 mc_dir_part(h
, ref
, n
, square
, chroma_height
, delta
, 0,
2001 dest_y
, dest_cb
, dest_cr
, x_offset
, y_offset
,
2002 qpix_op
, chroma_op
);
2005 chroma_op
= chroma_avg
;
2009 Picture
*ref
= &h
->ref_list
[1][ h
->ref_cache
[1][ scan8
[n
] ] ];
2010 mc_dir_part(h
, ref
, n
, square
, chroma_height
, delta
, 1,
2011 dest_y
, dest_cb
, dest_cr
, x_offset
, y_offset
,
2012 qpix_op
, chroma_op
);
2016 static void hl_motion(H264Context
*h
, uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2017 qpel_mc_func (*qpix_put
)[16], h264_chroma_mc_func (*chroma_put
),
2018 qpel_mc_func (*qpix_avg
)[16], h264_chroma_mc_func (*chroma_avg
)){
2019 MpegEncContext
* const s
= &h
->s
;
2020 const int mb_xy
= s
->mb_x
+ s
->mb_y
*s
->mb_stride
;
2021 const int mb_type
= s
->current_picture
.mb_type
[mb_xy
];
2023 assert(IS_INTER(mb_type
));
2025 if(IS_16X16(mb_type
)){
2026 mc_part(h
, 0, 1, 8, 0, dest_y
, dest_cb
, dest_cr
, 0, 0,
2027 qpix_put
[0], chroma_put
[0], qpix_avg
[0], chroma_avg
[0],
2028 IS_DIR(mb_type
, 0, 0), IS_DIR(mb_type
, 0, 1));
2029 }else if(IS_16X8(mb_type
)){
2030 mc_part(h
, 0, 0, 4, 8, dest_y
, dest_cb
, dest_cr
, 0, 0,
2031 qpix_put
[1], chroma_put
[0], qpix_avg
[1], chroma_avg
[0],
2032 IS_DIR(mb_type
, 0, 0), IS_DIR(mb_type
, 0, 1));
2033 mc_part(h
, 8, 0, 4, 8, dest_y
, dest_cb
, dest_cr
, 0, 4,
2034 qpix_put
[1], chroma_put
[0], qpix_avg
[1], chroma_avg
[0],
2035 IS_DIR(mb_type
, 1, 0), IS_DIR(mb_type
, 1, 1));
2036 }else if(IS_8X16(mb_type
)){
2037 mc_part(h
, 0, 0, 8, 8*s
->linesize
, dest_y
, dest_cb
, dest_cr
, 0, 0,
2038 qpix_put
[1], chroma_put
[1], qpix_avg
[1], chroma_avg
[1],
2039 IS_DIR(mb_type
, 0, 0), IS_DIR(mb_type
, 0, 1));
2040 mc_part(h
, 4, 0, 8, 8*s
->linesize
, dest_y
, dest_cb
, dest_cr
, 4, 0,
2041 qpix_put
[1], chroma_put
[1], qpix_avg
[1], chroma_avg
[1],
2042 IS_DIR(mb_type
, 1, 0), IS_DIR(mb_type
, 1, 1));
2046 assert(IS_8X8(mb_type
));
2049 const int sub_mb_type
= h
->sub_mb_type
[i
];
2051 int x_offset
= (i
&1)<<2;
2052 int y_offset
= (i
&2)<<1;
2054 if(IS_SUB_8X8(sub_mb_type
)){
2055 mc_part(h
, n
, 1, 4, 0, dest_y
, dest_cb
, dest_cr
, x_offset
, y_offset
,
2056 qpix_put
[1], chroma_put
[1], qpix_avg
[1], chroma_avg
[1],
2057 IS_DIR(sub_mb_type
, 0, 0), IS_DIR(sub_mb_type
, 0, 1));
2058 }else if(IS_SUB_8X4(sub_mb_type
)){
2059 mc_part(h
, n
, 0, 2, 4, dest_y
, dest_cb
, dest_cr
, x_offset
, y_offset
,
2060 qpix_put
[2], chroma_put
[1], qpix_avg
[2], chroma_avg
[1],
2061 IS_DIR(sub_mb_type
, 0, 0), IS_DIR(sub_mb_type
, 0, 1));
2062 mc_part(h
, n
+2, 0, 2, 4, dest_y
, dest_cb
, dest_cr
, x_offset
, y_offset
+2,
2063 qpix_put
[2], chroma_put
[1], qpix_avg
[2], chroma_avg
[1],
2064 IS_DIR(sub_mb_type
, 0, 0), IS_DIR(sub_mb_type
, 0, 1));
2065 }else if(IS_SUB_4X8(sub_mb_type
)){
2066 mc_part(h
, n
, 0, 4, 4*s
->linesize
, dest_y
, dest_cb
, dest_cr
, x_offset
, y_offset
,
2067 qpix_put
[2], chroma_put
[2], qpix_avg
[2], chroma_avg
[2],
2068 IS_DIR(sub_mb_type
, 0, 0), IS_DIR(sub_mb_type
, 0, 1));
2069 mc_part(h
, n
+1, 0, 4, 4*s
->linesize
, dest_y
, dest_cb
, dest_cr
, x_offset
+2, y_offset
,
2070 qpix_put
[2], chroma_put
[2], qpix_avg
[2], chroma_avg
[2],
2071 IS_DIR(sub_mb_type
, 0, 0), IS_DIR(sub_mb_type
, 0, 1));
2074 assert(IS_SUB_4X4(sub_mb_type
));
2076 int sub_x_offset
= x_offset
+ 2*(j
&1);
2077 int sub_y_offset
= y_offset
+ (j
&2);
2078 mc_part(h
, n
+j
, 1, 2, 0, dest_y
, dest_cb
, dest_cr
, sub_x_offset
, sub_y_offset
,
2079 qpix_put
[2], chroma_put
[2], qpix_avg
[2], chroma_avg
[2],
2080 IS_DIR(sub_mb_type
, 0, 0), IS_DIR(sub_mb_type
, 0, 1));
2087 static void decode_init_vlc(H264Context
*h
){
2088 static int done
= 0;
2094 init_vlc(&chroma_dc_coeff_token_vlc
, CHROMA_DC_COEFF_TOKEN_VLC_BITS
, 4*5,
2095 &chroma_dc_coeff_token_len
[0], 1, 1,
2096 &chroma_dc_coeff_token_bits
[0], 1, 1, 1);
2099 init_vlc(&coeff_token_vlc
[i
], COEFF_TOKEN_VLC_BITS
, 4*17,
2100 &coeff_token_len
[i
][0], 1, 1,
2101 &coeff_token_bits
[i
][0], 1, 1, 1);
2105 init_vlc(&chroma_dc_total_zeros_vlc
[i
], CHROMA_DC_TOTAL_ZEROS_VLC_BITS
, 4,
2106 &chroma_dc_total_zeros_len
[i
][0], 1, 1,
2107 &chroma_dc_total_zeros_bits
[i
][0], 1, 1, 1);
2109 for(i
=0; i
<15; i
++){
2110 init_vlc(&total_zeros_vlc
[i
], TOTAL_ZEROS_VLC_BITS
, 16,
2111 &total_zeros_len
[i
][0], 1, 1,
2112 &total_zeros_bits
[i
][0], 1, 1, 1);
2116 init_vlc(&run_vlc
[i
], RUN_VLC_BITS
, 7,
2117 &run_len
[i
][0], 1, 1,
2118 &run_bits
[i
][0], 1, 1, 1);
2120 init_vlc(&run7_vlc
, RUN7_VLC_BITS
, 16,
2121 &run_len
[6][0], 1, 1,
2122 &run_bits
[6][0], 1, 1, 1);
2127 * Sets the intra prediction function pointers.
2129 static void init_pred_ptrs(H264Context
*h
){
2130 // MpegEncContext * const s = &h->s;
2132 h
->pred4x4
[VERT_PRED
]= pred4x4_vertical_c
;
2133 h
->pred4x4
[HOR_PRED
]= pred4x4_horizontal_c
;
2134 h
->pred4x4
[DC_PRED
]= pred4x4_dc_c
;
2135 h
->pred4x4
[DIAG_DOWN_LEFT_PRED
]= pred4x4_down_left_c
;
2136 h
->pred4x4
[DIAG_DOWN_RIGHT_PRED
]= pred4x4_down_right_c
;
2137 h
->pred4x4
[VERT_RIGHT_PRED
]= pred4x4_vertical_right_c
;
2138 h
->pred4x4
[HOR_DOWN_PRED
]= pred4x4_horizontal_down_c
;
2139 h
->pred4x4
[VERT_LEFT_PRED
]= pred4x4_vertical_left_c
;
2140 h
->pred4x4
[HOR_UP_PRED
]= pred4x4_horizontal_up_c
;
2141 h
->pred4x4
[LEFT_DC_PRED
]= pred4x4_left_dc_c
;
2142 h
->pred4x4
[TOP_DC_PRED
]= pred4x4_top_dc_c
;
2143 h
->pred4x4
[DC_128_PRED
]= pred4x4_128_dc_c
;
2145 h
->pred8x8
[DC_PRED8x8
]= pred8x8_dc_c
;
2146 h
->pred8x8
[VERT_PRED8x8
]= pred8x8_vertical_c
;
2147 h
->pred8x8
[HOR_PRED8x8
]= pred8x8_horizontal_c
;
2148 h
->pred8x8
[PLANE_PRED8x8
]= pred8x8_plane_c
;
2149 h
->pred8x8
[LEFT_DC_PRED8x8
]= pred8x8_left_dc_c
;
2150 h
->pred8x8
[TOP_DC_PRED8x8
]= pred8x8_top_dc_c
;
2151 h
->pred8x8
[DC_128_PRED8x8
]= pred8x8_128_dc_c
;
2153 h
->pred16x16
[DC_PRED8x8
]= pred16x16_dc_c
;
2154 h
->pred16x16
[VERT_PRED8x8
]= pred16x16_vertical_c
;
2155 h
->pred16x16
[HOR_PRED8x8
]= pred16x16_horizontal_c
;
2156 h
->pred16x16
[PLANE_PRED8x8
]= pred16x16_plane_c
;
2157 h
->pred16x16
[LEFT_DC_PRED8x8
]= pred16x16_left_dc_c
;
2158 h
->pred16x16
[TOP_DC_PRED8x8
]= pred16x16_top_dc_c
;
2159 h
->pred16x16
[DC_128_PRED8x8
]= pred16x16_128_dc_c
;
2162 static void free_tables(H264Context
*h
){
2163 av_freep(&h
->intra4x4_pred_mode
);
2164 av_freep(&h
->chroma_pred_mode_table
);
2165 av_freep(&h
->cbp_table
);
2166 av_freep(&h
->mvd_table
[0]);
2167 av_freep(&h
->mvd_table
[1]);
2168 av_freep(&h
->non_zero_count
);
2169 av_freep(&h
->slice_table_base
);
2170 av_freep(&h
->top_border
);
2171 h
->slice_table
= NULL
;
2173 av_freep(&h
->mb2b_xy
);
2174 av_freep(&h
->mb2b8_xy
);
2179 * needs widzh/height
2181 static int alloc_tables(H264Context
*h
){
2182 MpegEncContext
* const s
= &h
->s
;
2183 const int big_mb_num
= s
->mb_stride
* (s
->mb_height
+1);
2186 CHECKED_ALLOCZ(h
->intra4x4_pred_mode
, big_mb_num
* 8 * sizeof(uint8_t))
2188 CHECKED_ALLOCZ(h
->non_zero_count
, big_mb_num
* 16 * sizeof(uint8_t))
2189 CHECKED_ALLOCZ(h
->slice_table_base
, big_mb_num
* sizeof(uint8_t))
2190 CHECKED_ALLOCZ(h
->top_border
, s
->mb_width
* (16+8+8) * sizeof(uint8_t))
2191 CHECKED_ALLOCZ(h
->cbp_table
, big_mb_num
* sizeof(uint16_t))
2193 if( h
->pps
.cabac
) {
2194 CHECKED_ALLOCZ(h
->chroma_pred_mode_table
, big_mb_num
* sizeof(uint8_t))
2195 CHECKED_ALLOCZ(h
->mvd_table
[0], 32*big_mb_num
* sizeof(uint16_t));
2196 CHECKED_ALLOCZ(h
->mvd_table
[1], 32*big_mb_num
* sizeof(uint16_t));
2199 memset(h
->slice_table_base
, -1, big_mb_num
* sizeof(uint8_t));
2200 h
->slice_table
= h
->slice_table_base
+ s
->mb_stride
+ 1;
2202 CHECKED_ALLOCZ(h
->mb2b_xy
, big_mb_num
* sizeof(uint16_t));
2203 CHECKED_ALLOCZ(h
->mb2b8_xy
, big_mb_num
* sizeof(uint16_t));
2204 for(y
=0; y
<s
->mb_height
; y
++){
2205 for(x
=0; x
<s
->mb_width
; x
++){
2206 const int mb_xy
= x
+ y
*s
->mb_stride
;
2207 const int b_xy
= 4*x
+ 4*y
*h
->b_stride
;
2208 const int b8_xy
= 2*x
+ 2*y
*h
->b8_stride
;
2210 h
->mb2b_xy
[mb_xy
]= b_xy
;
2211 h
->mb2b8_xy
[mb_xy
]= b8_xy
;
2221 static void common_init(H264Context
*h
){
2222 MpegEncContext
* const s
= &h
->s
;
2224 s
->width
= s
->avctx
->width
;
2225 s
->height
= s
->avctx
->height
;
2226 s
->codec_id
= s
->avctx
->codec
->id
;
2230 s
->unrestricted_mv
=1;
2231 s
->decode
=1; //FIXME
2234 static int decode_init(AVCodecContext
*avctx
){
2235 H264Context
*h
= avctx
->priv_data
;
2236 MpegEncContext
* const s
= &h
->s
;
2238 MPV_decode_defaults(s
);
2243 s
->out_format
= FMT_H264
;
2244 s
->workaround_bugs
= avctx
->workaround_bugs
;
2247 // s->decode_mb= ff_h263_decode_mb;
2249 avctx
->pix_fmt
= PIX_FMT_YUV420P
;
2253 if(avctx
->codec_tag
!= 0x31637661 && avctx
->codec_tag
!= 0x31435641) // avc1
2256 if((avctx
->extradata_size
== 0) || (avctx
->extradata
== NULL
)) {
2257 av_log(avctx
, AV_LOG_ERROR
, "AVC codec requires avcC data\n");
2267 static void frame_start(H264Context
*h
){
2268 MpegEncContext
* const s
= &h
->s
;
2271 MPV_frame_start(s
, s
->avctx
);
2272 ff_er_frame_start(s
);
2275 assert(s
->linesize
&& s
->uvlinesize
);
2277 for(i
=0; i
<16; i
++){
2278 h
->block_offset
[i
]= 4*((scan8
[i
] - scan8
[0])&7) + 4*s
->linesize
*((scan8
[i
] - scan8
[0])>>3);
2279 h
->chroma_subblock_offset
[i
]= 2*((scan8
[i
] - scan8
[0])&7) + 2*s
->uvlinesize
*((scan8
[i
] - scan8
[0])>>3);
2282 h
->block_offset
[16+i
]=
2283 h
->block_offset
[20+i
]= 4*((scan8
[i
] - scan8
[0])&7) + 4*s
->uvlinesize
*((scan8
[i
] - scan8
[0])>>3);
2286 // s->decode= (s->flags&CODEC_FLAG_PSNR) || !s->encoding || s->current_picture.reference /*|| h->contains_intra*/ || 1;
2289 static inline void backup_mb_border(H264Context
*h
, uint8_t *src_y
, uint8_t *src_cb
, uint8_t *src_cr
, int linesize
, int uvlinesize
){
2290 MpegEncContext
* const s
= &h
->s
;
2294 src_cb
-= uvlinesize
;
2295 src_cr
-= uvlinesize
;
2297 h
->left_border
[0]= h
->top_border
[s
->mb_x
][15];
2298 for(i
=1; i
<17; i
++){
2299 h
->left_border
[i
]= src_y
[15+i
* linesize
];
2302 *(uint64_t*)(h
->top_border
[s
->mb_x
]+0)= *(uint64_t*)(src_y
+ 16*linesize
);
2303 *(uint64_t*)(h
->top_border
[s
->mb_x
]+8)= *(uint64_t*)(src_y
+8+16*linesize
);
2305 if(!(s
->flags
&CODEC_FLAG_GRAY
)){
2306 h
->left_border
[17 ]= h
->top_border
[s
->mb_x
][16+7];
2307 h
->left_border
[17+9]= h
->top_border
[s
->mb_x
][24+7];
2309 h
->left_border
[i
+17 ]= src_cb
[7+i
*uvlinesize
];
2310 h
->left_border
[i
+17+9]= src_cr
[7+i
*uvlinesize
];
2312 *(uint64_t*)(h
->top_border
[s
->mb_x
]+16)= *(uint64_t*)(src_cb
+8*uvlinesize
);
2313 *(uint64_t*)(h
->top_border
[s
->mb_x
]+24)= *(uint64_t*)(src_cr
+8*uvlinesize
);
2317 static inline void xchg_mb_border(H264Context
*h
, uint8_t *src_y
, uint8_t *src_cb
, uint8_t *src_cr
, int linesize
, int uvlinesize
, int xchg
){
2318 MpegEncContext
* const s
= &h
->s
;
2321 int deblock_left
= (s
->mb_x
> 0);
2322 int deblock_top
= (s
->mb_y
> 0);
2324 src_y
-= linesize
+ 1;
2325 src_cb
-= uvlinesize
+ 1;
2326 src_cr
-= uvlinesize
+ 1;
2328 #define XCHG(a,b,t,xchg)\
2335 for(i
= !deblock_top
; i
<17; i
++){
2336 XCHG(h
->left_border
[i
], src_y
[i
* linesize
], temp8
, xchg
);
2341 XCHG(*(uint64_t*)(h
->top_border
[s
->mb_x
]+0), *(uint64_t*)(src_y
+1), temp64
, xchg
);
2342 XCHG(*(uint64_t*)(h
->top_border
[s
->mb_x
]+8), *(uint64_t*)(src_y
+9), temp64
, 1);
2345 if(!(s
->flags
&CODEC_FLAG_GRAY
)){
2347 for(i
= !deblock_top
; i
<9; i
++){
2348 XCHG(h
->left_border
[i
+17 ], src_cb
[i
*uvlinesize
], temp8
, xchg
);
2349 XCHG(h
->left_border
[i
+17+9], src_cr
[i
*uvlinesize
], temp8
, xchg
);
2353 XCHG(*(uint64_t*)(h
->top_border
[s
->mb_x
]+16), *(uint64_t*)(src_cb
+1), temp64
, 1);
2354 XCHG(*(uint64_t*)(h
->top_border
[s
->mb_x
]+24), *(uint64_t*)(src_cr
+1), temp64
, 1);
2359 static void hl_decode_mb(H264Context
*h
){
2360 MpegEncContext
* const s
= &h
->s
;
2361 const int mb_x
= s
->mb_x
;
2362 const int mb_y
= s
->mb_y
;
2363 const int mb_xy
= mb_x
+ mb_y
*s
->mb_stride
;
2364 const int mb_type
= s
->current_picture
.mb_type
[mb_xy
];
2365 uint8_t *dest_y
, *dest_cb
, *dest_cr
;
2366 int linesize
, uvlinesize
/*dct_offset*/;
2375 dest_y
= s
->current_picture
.data
[0] + (mb_y
* 16* s
->linesize
) + mb_x
* 16;
2376 dest_cb
= s
->current_picture
.data
[1] + (mb_y
* 8 * s
->uvlinesize
) + mb_x
* 8;
2377 dest_cr
= s
->current_picture
.data
[2] + (mb_y
* 8 * s
->uvlinesize
) + mb_x
* 8;
2379 if (h
->mb_field_decoding_flag
) {
2380 linesize
= s
->linesize
* 2;
2381 uvlinesize
= s
->uvlinesize
* 2;
2382 if(mb_y
&1){ //FIXME move out of this func?
2383 dest_y
-= s
->linesize
*15;
2384 dest_cb
-= s
->linesize
*7;
2385 dest_cr
-= s
->linesize
*7;
2388 linesize
= s
->linesize
;
2389 uvlinesize
= s
->uvlinesize
;
2390 // dct_offset = s->linesize * 16;
2393 if(IS_INTRA(mb_type
)){
2394 if(h
->deblocking_filter
)
2395 xchg_mb_border(h
, dest_y
, dest_cb
, dest_cr
, linesize
, uvlinesize
, 1);
2397 if(!(s
->flags
&CODEC_FLAG_GRAY
)){
2398 h
->pred8x8
[ h
->chroma_pred_mode
](dest_cb
, uvlinesize
);
2399 h
->pred8x8
[ h
->chroma_pred_mode
](dest_cr
, uvlinesize
);
2402 if(IS_INTRA4x4(mb_type
)){
2404 for(i
=0; i
<16; i
++){
2405 uint8_t * const ptr
= dest_y
+ h
->block_offset
[i
];
2407 const int dir
= h
->intra4x4_pred_mode_cache
[ scan8
[i
] ];
2410 if(dir
== DIAG_DOWN_LEFT_PRED
|| dir
== VERT_LEFT_PRED
){
2411 const int topright_avail
= (h
->topright_samples_available
<<i
)&0x8000;
2412 assert(mb_y
|| linesize
<= h
->block_offset
[i
]);
2413 if(!topright_avail
){
2414 tr
= ptr
[3 - linesize
]*0x01010101;
2415 topright
= (uint8_t*) &tr
;
2416 }else if(i
==5 && h
->deblocking_filter
){
2417 tr
= *(uint32_t*)h
->top_border
[mb_x
+1];
2418 topright
= (uint8_t*) &tr
;
2420 topright
= ptr
+ 4 - linesize
;
2424 h
->pred4x4
[ dir
](ptr
, topright
, linesize
);
2425 if(h
->non_zero_count_cache
[ scan8
[i
] ]){
2426 if(s
->codec_id
== CODEC_ID_H264
)
2427 s
->dsp
.h264_idct_add(ptr
, h
->mb
+ i
*16, linesize
);
2429 svq3_add_idct_c(ptr
, h
->mb
+ i
*16, linesize
, s
->qscale
, 0);
2434 h
->pred16x16
[ h
->intra16x16_pred_mode
](dest_y
, linesize
);
2435 if(s
->codec_id
== CODEC_ID_H264
)
2436 h264_luma_dc_dequant_idct_c(h
->mb
, s
->qscale
);
2438 svq3_luma_dc_dequant_idct_c(h
->mb
, s
->qscale
);
2440 if(h
->deblocking_filter
)
2441 xchg_mb_border(h
, dest_y
, dest_cb
, dest_cr
, linesize
, uvlinesize
, 0);
2442 }else if(s
->codec_id
== CODEC_ID_H264
){
2443 hl_motion(h
, dest_y
, dest_cb
, dest_cr
,
2444 s
->dsp
.put_h264_qpel_pixels_tab
, s
->dsp
.put_h264_chroma_pixels_tab
,
2445 s
->dsp
.avg_h264_qpel_pixels_tab
, s
->dsp
.avg_h264_chroma_pixels_tab
);
2449 if(!IS_INTRA4x4(mb_type
)){
2450 if(s
->codec_id
== CODEC_ID_H264
){
2451 for(i
=0; i
<16; i
++){
2452 if(h
->non_zero_count_cache
[ scan8
[i
] ] || h
->mb
[i
*16]){ //FIXME benchmark weird rule, & below
2453 uint8_t * const ptr
= dest_y
+ h
->block_offset
[i
];
2454 s
->dsp
.h264_idct_add(ptr
, h
->mb
+ i
*16, linesize
);
2458 for(i
=0; i
<16; i
++){
2459 if(h
->non_zero_count_cache
[ scan8
[i
] ] || h
->mb
[i
*16]){ //FIXME benchmark weird rule, & below
2460 uint8_t * const ptr
= dest_y
+ h
->block_offset
[i
];
2461 svq3_add_idct_c(ptr
, h
->mb
+ i
*16, linesize
, s
->qscale
, IS_INTRA(mb_type
) ?
1 : 0);
2467 if(!(s
->flags
&CODEC_FLAG_GRAY
)){
2468 chroma_dc_dequant_idct_c(h
->mb
+ 16*16, h
->chroma_qp
);
2469 chroma_dc_dequant_idct_c(h
->mb
+ 16*16+4*16, h
->chroma_qp
);
2470 if(s
->codec_id
== CODEC_ID_H264
){
2471 for(i
=16; i
<16+4; i
++){
2472 if(h
->non_zero_count_cache
[ scan8
[i
] ] || h
->mb
[i
*16]){
2473 uint8_t * const ptr
= dest_cb
+ h
->block_offset
[i
];
2474 s
->dsp
.h264_idct_add(ptr
, h
->mb
+ i
*16, uvlinesize
);
2477 for(i
=20; i
<20+4; i
++){
2478 if(h
->non_zero_count_cache
[ scan8
[i
] ] || h
->mb
[i
*16]){
2479 uint8_t * const ptr
= dest_cr
+ h
->block_offset
[i
];
2480 s
->dsp
.h264_idct_add(ptr
, h
->mb
+ i
*16, uvlinesize
);
2484 for(i
=16; i
<16+4; i
++){
2485 if(h
->non_zero_count_cache
[ scan8
[i
] ] || h
->mb
[i
*16]){
2486 uint8_t * const ptr
= dest_cb
+ h
->block_offset
[i
];
2487 svq3_add_idct_c(ptr
, h
->mb
+ i
*16, uvlinesize
, chroma_qp
[s
->qscale
+ 12] - 12, 2);
2490 for(i
=20; i
<20+4; i
++){
2491 if(h
->non_zero_count_cache
[ scan8
[i
] ] || h
->mb
[i
*16]){
2492 uint8_t * const ptr
= dest_cr
+ h
->block_offset
[i
];
2493 svq3_add_idct_c(ptr
, h
->mb
+ i
*16, uvlinesize
, chroma_qp
[s
->qscale
+ 12] - 12, 2);
2498 if(h
->deblocking_filter
) {
2499 backup_mb_border(h
, dest_y
, dest_cb
, dest_cr
, linesize
, uvlinesize
);
2500 filter_mb(h
, mb_x
, mb_y
, dest_y
, dest_cb
, dest_cr
);
2505 * fills the default_ref_list.
2507 static int fill_default_ref_list(H264Context
*h
){
2508 MpegEncContext
* const s
= &h
->s
;
2510 Picture sorted_short_ref
[16];
2512 if(h
->slice_type
==B_TYPE
){
2516 for(out_i
=0; out_i
<h
->short_ref_count
; out_i
++){
2518 int best_poc
=INT_MAX
;
2520 for(i
=0; i
<h
->short_ref_count
; i
++){
2521 const int poc
= h
->short_ref
[i
]->poc
;
2522 if(poc
> limit
&& poc
< best_poc
){
2528 assert(best_i
!= -1);
2531 sorted_short_ref
[out_i
]= *h
->short_ref
[best_i
];
2535 if(s
->picture_structure
== PICT_FRAME
){
2536 if(h
->slice_type
==B_TYPE
){
2537 const int current_poc
= s
->current_picture_ptr
->poc
;
2540 for(list
=0; list
<2; list
++){
2543 for(i
=0; i
<h
->short_ref_count
&& index
< h
->ref_count
[list
]; i
++){
2544 const int i2
= list ? h
->short_ref_count
- i
- 1 : i
;
2545 const int poc
= sorted_short_ref
[i2
].poc
;
2547 if(sorted_short_ref
[i2
].reference
!= 3) continue; //FIXME refernce field shit
2549 if((list
==1 && poc
> current_poc
) || (list
==0 && poc
< current_poc
)){
2550 h
->default_ref_list
[list
][index
]= sorted_short_ref
[i2
];
2551 h
->default_ref_list
[list
][index
++].pic_id
= sorted_short_ref
[i2
].frame_num
;
2555 for(i
=0; i
<h
->long_ref_count
&& index
< h
->ref_count
[ list
]; i
++){
2556 if(h
->long_ref
[i
]->reference
!= 3) continue;
2558 h
->default_ref_list
[ list
][index
]= *h
->long_ref
[i
];
2559 h
->default_ref_list
[ list
][index
++].pic_id
= i
;;
2562 if(h
->long_ref_count
> 1 && h
->short_ref_count
==0){
2563 Picture temp
= h
->default_ref_list
[1][0];
2564 h
->default_ref_list
[1][0] = h
->default_ref_list
[1][1];
2565 h
->default_ref_list
[1][0] = temp
;
2568 if(index
< h
->ref_count
[ list
])
2569 memset(&h
->default_ref_list
[list
][index
], 0, sizeof(Picture
)*(h
->ref_count
[ list
] - index
));
2573 for(i
=0; i
<h
->short_ref_count
&& index
< h
->ref_count
[0]; i
++){
2574 if(h
->short_ref
[i
]->reference
!= 3) continue; //FIXME refernce field shit
2575 h
->default_ref_list
[0][index
]= *h
->short_ref
[i
];
2576 h
->default_ref_list
[0][index
++].pic_id
= h
->short_ref
[i
]->frame_num
;
2578 for(i
=0; i
<h
->long_ref_count
&& index
< h
->ref_count
[0]; i
++){
2579 if(h
->long_ref
[i
]->reference
!= 3) continue;
2580 h
->default_ref_list
[0][index
]= *h
->long_ref
[i
];
2581 h
->default_ref_list
[0][index
++].pic_id
= i
;;
2583 if(index
< h
->ref_count
[0])
2584 memset(&h
->default_ref_list
[0][index
], 0, sizeof(Picture
)*(h
->ref_count
[0] - index
));
2587 if(h
->slice_type
==B_TYPE
){
2589 //FIXME second field balh
2595 static int decode_ref_pic_list_reordering(H264Context
*h
){
2596 MpegEncContext
* const s
= &h
->s
;
2599 if(h
->slice_type
==I_TYPE
|| h
->slice_type
==SI_TYPE
) return 0; //FIXME move beofre func
2601 for(list
=0; list
<2; list
++){
2602 memcpy(h
->ref_list
[list
], h
->default_ref_list
[list
], sizeof(Picture
)*h
->ref_count
[list
]);
2604 if(get_bits1(&s
->gb
)){
2605 int pred
= h
->curr_pic_num
;
2608 for(index
=0; ; index
++){
2609 int reordering_of_pic_nums_idc
= get_ue_golomb(&s
->gb
);
2613 if(reordering_of_pic_nums_idc
==3)
2616 if(index
>= h
->ref_count
[list
]){
2617 av_log(h
->s
.avctx
, AV_LOG_ERROR
, "reference count overflow\n");
2621 if(reordering_of_pic_nums_idc
<3){
2622 if(reordering_of_pic_nums_idc
<2){
2623 const int abs_diff_pic_num
= get_ue_golomb(&s
->gb
) + 1;
2625 if(abs_diff_pic_num
>= h
->max_pic_num
){
2626 av_log(h
->s
.avctx
, AV_LOG_ERROR
, "abs_diff_pic_num overflow\n");
2630 if(reordering_of_pic_nums_idc
== 0) pred
-= abs_diff_pic_num
;
2631 else pred
+= abs_diff_pic_num
;
2632 pred
&= h
->max_pic_num
- 1;
2634 for(i
= h
->ref_count
[list
]-1; i
>=index
; i
--){
2635 if(h
->ref_list
[list
][i
].pic_id
== pred
&& h
->ref_list
[list
][i
].long_ref
==0)
2639 pic_id
= get_ue_golomb(&s
->gb
); //long_term_pic_idx
2641 for(i
= h
->ref_count
[list
]-1; i
>=index
; i
--){
2642 if(h
->ref_list
[list
][i
].pic_id
== pic_id
&& h
->ref_list
[list
][i
].long_ref
==1)
2648 av_log(h
->s
.avctx
, AV_LOG_ERROR
, "reference picture missing during reorder\n");
2649 memset(&h
->ref_list
[list
][index
], 0, sizeof(Picture
)); //FIXME
2650 }else if(i
> index
){
2651 Picture tmp
= h
->ref_list
[list
][i
];
2652 for(; i
>index
; i
--){
2653 h
->ref_list
[list
][i
]= h
->ref_list
[list
][i
-1];
2655 h
->ref_list
[list
][index
]= tmp
;
2658 av_log(h
->s
.avctx
, AV_LOG_ERROR
, "illegal reordering_of_pic_nums_idc\n");
2664 if(h
->slice_type
!=B_TYPE
) break;
2669 static int pred_weight_table(H264Context
*h
){
2670 MpegEncContext
* const s
= &h
->s
;
2673 h
->luma_log2_weight_denom
= get_ue_golomb(&s
->gb
);
2674 h
->chroma_log2_weight_denom
= get_ue_golomb(&s
->gb
);
2676 for(list
=0; list
<2; list
++){
2677 for(i
=0; i
<h
->ref_count
[list
]; i
++){
2678 int luma_weight_flag
, chroma_weight_flag
;
2680 luma_weight_flag
= get_bits1(&s
->gb
);
2681 if(luma_weight_flag
){
2682 h
->luma_weight
[list
][i
]= get_se_golomb(&s
->gb
);
2683 h
->luma_offset
[list
][i
]= get_se_golomb(&s
->gb
);
2686 chroma_weight_flag
= get_bits1(&s
->gb
);
2687 if(chroma_weight_flag
){