vismv for h264 displayed incorrect motion vectors.
[libav.git] / libavcodec / h264.c
1 /*
2 * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
3 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 */
20
21 /**
22 * @file h264.c
23 * H.264 / AVC / MPEG4 part10 codec.
24 * @author Michael Niedermayer <michaelni@gmx.at>
25 */
26
27 #include "common.h"
28 #include "dsputil.h"
29 #include "avcodec.h"
30 #include "mpegvideo.h"
31 #include "h264data.h"
32 #include "golomb.h"
33
34 #include "cabac.h"
35
36 #undef NDEBUG
37 #include <assert.h>
38
39 #define interlaced_dct interlaced_dct_is_a_bad_name
40 #define mb_intra mb_intra_isnt_initalized_see_mb_type
41
42 #define LUMA_DC_BLOCK_INDEX 25
43 #define CHROMA_DC_BLOCK_INDEX 26
44
45 #define CHROMA_DC_COEFF_TOKEN_VLC_BITS 8
46 #define COEFF_TOKEN_VLC_BITS 8
47 #define TOTAL_ZEROS_VLC_BITS 9
48 #define CHROMA_DC_TOTAL_ZEROS_VLC_BITS 3
49 #define RUN_VLC_BITS 3
50 #define RUN7_VLC_BITS 6
51
52 #define MAX_SPS_COUNT 32
53 #define MAX_PPS_COUNT 256
54
55 #define MAX_MMCO_COUNT 66
56
57 /**
58 * Sequence parameter set
59 */
60 typedef struct SPS{
61
62 int profile_idc;
63 int level_idc;
64 int log2_max_frame_num; ///< log2_max_frame_num_minus4 + 4
65 int poc_type; ///< pic_order_cnt_type
66 int log2_max_poc_lsb; ///< log2_max_pic_order_cnt_lsb_minus4
67 int delta_pic_order_always_zero_flag;
68 int offset_for_non_ref_pic;
69 int offset_for_top_to_bottom_field;
70 int poc_cycle_length; ///< num_ref_frames_in_pic_order_cnt_cycle
71 int ref_frame_count; ///< num_ref_frames
72 int gaps_in_frame_num_allowed_flag;
73 int mb_width; ///< frame_width_in_mbs_minus1 + 1
74 int mb_height; ///< frame_height_in_mbs_minus1 + 1
75 int frame_mbs_only_flag;
76 int mb_aff; ///<mb_adaptive_frame_field_flag
77 int direct_8x8_inference_flag;
78 int crop; ///< frame_cropping_flag
79 int crop_left; ///< frame_cropping_rect_left_offset
80 int crop_right; ///< frame_cropping_rect_right_offset
81 int crop_top; ///< frame_cropping_rect_top_offset
82 int crop_bottom; ///< frame_cropping_rect_bottom_offset
83 int vui_parameters_present_flag;
84 AVRational sar;
85 int timing_info_present_flag;
86 uint32_t num_units_in_tick;
87 uint32_t time_scale;
88 int fixed_frame_rate_flag;
89 short offset_for_ref_frame[256]; //FIXME dyn aloc?
90 }SPS;
91
92 /**
93 * Picture parameter set
94 */
95 typedef struct PPS{
96 int sps_id;
97 int cabac; ///< entropy_coding_mode_flag
98 int pic_order_present; ///< pic_order_present_flag
99 int slice_group_count; ///< num_slice_groups_minus1 + 1
100 int mb_slice_group_map_type;
101 int ref_count[2]; ///< num_ref_idx_l0/1_active_minus1 + 1
102 int weighted_pred; ///< weighted_pred_flag
103 int weighted_bipred_idc;
104 int init_qp; ///< pic_init_qp_minus26 + 26
105 int init_qs; ///< pic_init_qs_minus26 + 26
106 int chroma_qp_index_offset;
107 int deblocking_filter_parameters_present; ///< deblocking_filter_parameters_present_flag
108 int constrained_intra_pred; ///< constrained_intra_pred_flag
109 int redundant_pic_cnt_present; ///< redundant_pic_cnt_present_flag
110 }PPS;
111
112 /**
113 * Memory management control operation opcode.
114 */
115 typedef enum MMCOOpcode{
116 MMCO_END=0,
117 MMCO_SHORT2UNUSED,
118 MMCO_LONG2UNUSED,
119 MMCO_SHORT2LONG,
120 MMCO_SET_MAX_LONG,
121 MMCO_RESET,
122 MMCO_LONG,
123 } MMCOOpcode;
124
125 /**
126 * Memory management control operation.
127 */
128 typedef struct MMCO{
129 MMCOOpcode opcode;
130 int short_frame_num;
131 int long_index;
132 } MMCO;
133
134 /**
135 * H264Context
136 */
137 typedef struct H264Context{
138 MpegEncContext s;
139 int nal_ref_idc;
140 int nal_unit_type;
141 #define NAL_SLICE 1
142 #define NAL_DPA 2
143 #define NAL_DPB 3
144 #define NAL_DPC 4
145 #define NAL_IDR_SLICE 5
146 #define NAL_SEI 6
147 #define NAL_SPS 7
148 #define NAL_PPS 8
149 #define NAL_PICTURE_DELIMITER 9
150 #define NAL_FILTER_DATA 10
151 uint8_t *rbsp_buffer;
152 int rbsp_buffer_size;
153
154 /**
155 * Used to parse AVC variant of h264
156 */
157 int is_avc; ///< this flag is != 0 if codec is avc1
158 int got_avcC; ///< flag used to parse avcC data only once
159 int nal_length_size; ///< Number of bytes used for nal length (1, 2 or 4)
160
161 int chroma_qp; //QPc
162
163 int prev_mb_skiped; //FIXME remove (IMHO not used)
164
165 //prediction stuff
166 int chroma_pred_mode;
167 int intra16x16_pred_mode;
168
169 int8_t intra4x4_pred_mode_cache[5*8];
170 int8_t (*intra4x4_pred_mode)[8];
171 void (*pred4x4 [9+3])(uint8_t *src, uint8_t *topright, int stride);//FIXME move to dsp?
172 void (*pred8x8 [4+3])(uint8_t *src, int stride);
173 void (*pred16x16[4+3])(uint8_t *src, int stride);
174 unsigned int topleft_samples_available;
175 unsigned int top_samples_available;
176 unsigned int topright_samples_available;
177 unsigned int left_samples_available;
178 uint8_t (*top_border)[16+2*8];
179 uint8_t left_border[17+2*9];
180
181 /**
182 * non zero coeff count cache.
183 * is 64 if not available.
184 */
185 uint8_t non_zero_count_cache[6*8];
186 uint8_t (*non_zero_count)[16];
187
188 /**
189 * Motion vector cache.
190 */
191 int16_t mv_cache[2][5*8][2];
192 int8_t ref_cache[2][5*8];
193 #define LIST_NOT_USED -1 //FIXME rename?
194 #define PART_NOT_AVAILABLE -2
195
196 /**
197 * is 1 if the specific list MV&references are set to 0,0,-2.
198 */
199 int mv_cache_clean[2];
200
201 int block_offset[16+8];
202 int chroma_subblock_offset[16]; //FIXME remove
203
204 uint16_t *mb2b_xy; //FIXME are these 4 a good idea?
205 uint16_t *mb2b8_xy;
206 int b_stride; //FIXME use s->b4_stride
207 int b8_stride;
208
209 int halfpel_flag;
210 int thirdpel_flag;
211
212 int unknown_svq3_flag;
213 int next_slice_index;
214
215 SPS sps_buffer[MAX_SPS_COUNT];
216 SPS sps; ///< current sps
217
218 PPS pps_buffer[MAX_PPS_COUNT];
219 /**
220 * current pps
221 */
222 PPS pps; //FIXME move tp Picture perhaps? (->no) do we need that?
223
224 int slice_num;
225 uint8_t *slice_table_base;
226 uint8_t *slice_table; ///< slice_table_base + mb_stride + 1
227 int slice_type;
228 int slice_type_fixed;
229
230 //interlacing specific flags
231 int mb_field_decoding_flag;
232
233 int sub_mb_type[4];
234
235 //POC stuff
236 int poc_lsb;
237 int poc_msb;
238 int delta_poc_bottom;
239 int delta_poc[2];
240 int frame_num;
241 int prev_poc_msb; ///< poc_msb of the last reference pic for POC type 0
242 int prev_poc_lsb; ///< poc_lsb of the last reference pic for POC type 0
243 int frame_num_offset; ///< for POC type 2
244 int prev_frame_num_offset; ///< for POC type 2
245 int prev_frame_num; ///< frame_num of the last pic for POC type 1/2
246
247 /**
248 * frame_num for frames or 2*frame_num for field pics.
249 */
250 int curr_pic_num;
251
252 /**
253 * max_frame_num or 2*max_frame_num for field pics.
254 */
255 int max_pic_num;
256
257 //Weighted pred stuff
258 int luma_log2_weight_denom;
259 int chroma_log2_weight_denom;
260 int luma_weight[2][16];
261 int luma_offset[2][16];
262 int chroma_weight[2][16][2];
263 int chroma_offset[2][16][2];
264
265 //deblock
266 int deblocking_filter; ///< disable_deblocking_filter_idc with 1<->0
267 int slice_alpha_c0_offset;
268 int slice_beta_offset;
269
270 int redundant_pic_count;
271
272 int direct_spatial_mv_pred;
273
274 /**
275 * num_ref_idx_l0/1_active_minus1 + 1
276 */
277 int ref_count[2];// FIXME split for AFF
278 Picture *short_ref[16];
279 Picture *long_ref[16];
280 Picture default_ref_list[2][32];
281 Picture ref_list[2][32]; //FIXME size?
282 Picture field_ref_list[2][32]; //FIXME size?
283
284 /**
285 * memory management control operations buffer.
286 */
287 MMCO mmco[MAX_MMCO_COUNT];
288 int mmco_index;
289
290 int long_ref_count; ///< number of actual long term references
291 int short_ref_count; ///< number of actual short term references
292
293 //data partitioning
294 GetBitContext intra_gb;
295 GetBitContext inter_gb;
296 GetBitContext *intra_gb_ptr;
297 GetBitContext *inter_gb_ptr;
298
299 DCTELEM mb[16*24] __align8;
300
301 /**
302 * Cabac
303 */
304 CABACContext cabac;
305 uint8_t cabac_state[399];
306 int cabac_init_idc;
307
308 /* 0x100 -> non null luma_dc, 0x80/0x40 -> non null chroma_dc (cb/cr), 0x?0 -> chroma_cbp(0,1,2), 0x0? luma_cbp */
309 uint16_t *cbp_table;
310 int top_cbp;
311 int left_cbp;
312 /* chroma_pred_mode for i4x4 or i16x16, else 0 */
313 uint8_t *chroma_pred_mode_table;
314 int last_qscale_diff;
315 int16_t (*mvd_table[2])[2];
316 int16_t mvd_cache[2][5*8][2];
317
318 }H264Context;
319
320 static VLC coeff_token_vlc[4];
321 static VLC chroma_dc_coeff_token_vlc;
322
323 static VLC total_zeros_vlc[15];
324 static VLC chroma_dc_total_zeros_vlc[3];
325
326 static VLC run_vlc[6];
327 static VLC run7_vlc;
328
329 static void svq3_luma_dc_dequant_idct_c(DCTELEM *block, int qp);
330 static void svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp, int dc);
331 static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr);
332
333 static inline uint32_t pack16to32(int a, int b){
334 #ifdef WORDS_BIGENDIAN
335 return (b&0xFFFF) + (a<<16);
336 #else
337 return (a&0xFFFF) + (b<<16);
338 #endif
339 }
340
341 /**
342 * fill a rectangle.
343 * @param h height of the rectangle, should be a constant
344 * @param w width of the rectangle, should be a constant
345 * @param size the size of val (1 or 4), should be a constant
346 */
347 static inline void fill_rectangle(void *vp, int w, int h, int stride, uint32_t val, int size){ //FIXME ensure this IS inlined
348 uint8_t *p= (uint8_t*)vp;
349 assert(size==1 || size==4);
350
351 w *= size;
352 stride *= size;
353
354 //FIXME check what gcc generates for 64 bit on x86 and possible write a 32 bit ver of it
355 if(w==2 && h==2){
356 *(uint16_t*)(p + 0)=
357 *(uint16_t*)(p + stride)= size==4 ? val : val*0x0101;
358 }else if(w==2 && h==4){
359 *(uint16_t*)(p + 0*stride)=
360 *(uint16_t*)(p + 1*stride)=
361 *(uint16_t*)(p + 2*stride)=
362 *(uint16_t*)(p + 3*stride)= size==4 ? val : val*0x0101;
363 }else if(w==4 && h==1){
364 *(uint32_t*)(p + 0*stride)= size==4 ? val : val*0x01010101;
365 }else if(w==4 && h==2){
366 *(uint32_t*)(p + 0*stride)=
367 *(uint32_t*)(p + 1*stride)= size==4 ? val : val*0x01010101;
368 }else if(w==4 && h==4){
369 *(uint32_t*)(p + 0*stride)=
370 *(uint32_t*)(p + 1*stride)=
371 *(uint32_t*)(p + 2*stride)=
372 *(uint32_t*)(p + 3*stride)= size==4 ? val : val*0x01010101;
373 }else if(w==8 && h==1){
374 *(uint32_t*)(p + 0)=
375 *(uint32_t*)(p + 4)= size==4 ? val : val*0x01010101;
376 }else if(w==8 && h==2){
377 *(uint32_t*)(p + 0 + 0*stride)=
378 *(uint32_t*)(p + 4 + 0*stride)=
379 *(uint32_t*)(p + 0 + 1*stride)=
380 *(uint32_t*)(p + 4 + 1*stride)= size==4 ? val : val*0x01010101;
381 }else if(w==8 && h==4){
382 *(uint64_t*)(p + 0*stride)=
383 *(uint64_t*)(p + 1*stride)=
384 *(uint64_t*)(p + 2*stride)=
385 *(uint64_t*)(p + 3*stride)= size==4 ? val*0x0100000001ULL : val*0x0101010101010101ULL;
386 }else if(w==16 && h==2){
387 *(uint64_t*)(p + 0+0*stride)=
388 *(uint64_t*)(p + 8+0*stride)=
389 *(uint64_t*)(p + 0+1*stride)=
390 *(uint64_t*)(p + 8+1*stride)= size==4 ? val*0x0100000001ULL : val*0x0101010101010101ULL;
391 }else if(w==16 && h==4){
392 *(uint64_t*)(p + 0+0*stride)=
393 *(uint64_t*)(p + 8+0*stride)=
394 *(uint64_t*)(p + 0+1*stride)=
395 *(uint64_t*)(p + 8+1*stride)=
396 *(uint64_t*)(p + 0+2*stride)=
397 *(uint64_t*)(p + 8+2*stride)=
398 *(uint64_t*)(p + 0+3*stride)=
399 *(uint64_t*)(p + 8+3*stride)= size==4 ? val*0x0100000001ULL : val*0x0101010101010101ULL;
400 }else
401 assert(0);
402 }
403
404 static inline void fill_caches(H264Context *h, int mb_type){
405 MpegEncContext * const s = &h->s;
406 const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
407 int topleft_xy, top_xy, topright_xy, left_xy[2];
408 int topleft_type, top_type, topright_type, left_type[2];
409 int left_block[4];
410 int i;
411
412 //wow what a mess, why didnt they simplify the interlacing&intra stuff, i cant imagine that these complex rules are worth it
413
414 if(h->sps.mb_aff){
415 //FIXME
416 topleft_xy = 0; /* avoid warning */
417 top_xy = 0; /* avoid warning */
418 topright_xy = 0; /* avoid warning */
419 }else{
420 topleft_xy = mb_xy-1 - s->mb_stride;
421 top_xy = mb_xy - s->mb_stride;
422 topright_xy= mb_xy+1 - s->mb_stride;
423 left_xy[0] = mb_xy-1;
424 left_xy[1] = mb_xy-1;
425 left_block[0]= 0;
426 left_block[1]= 1;
427 left_block[2]= 2;
428 left_block[3]= 3;
429 }
430
431 topleft_type = h->slice_table[topleft_xy ] == h->slice_num ? s->current_picture.mb_type[topleft_xy] : 0;
432 top_type = h->slice_table[top_xy ] == h->slice_num ? s->current_picture.mb_type[top_xy] : 0;
433 topright_type= h->slice_table[topright_xy] == h->slice_num ? s->current_picture.mb_type[topright_xy]: 0;
434 left_type[0] = h->slice_table[left_xy[0] ] == h->slice_num ? s->current_picture.mb_type[left_xy[0]] : 0;
435 left_type[1] = h->slice_table[left_xy[1] ] == h->slice_num ? s->current_picture.mb_type[left_xy[1]] : 0;
436
437 if(IS_INTRA(mb_type)){
438 h->topleft_samples_available=
439 h->top_samples_available=
440 h->left_samples_available= 0xFFFF;
441 h->topright_samples_available= 0xEEEA;
442
443 if(!IS_INTRA(top_type) && (top_type==0 || h->pps.constrained_intra_pred)){
444 h->topleft_samples_available= 0xB3FF;
445 h->top_samples_available= 0x33FF;
446 h->topright_samples_available= 0x26EA;
447 }
448 for(i=0; i<2; i++){
449 if(!IS_INTRA(left_type[i]) && (left_type[i]==0 || h->pps.constrained_intra_pred)){
450 h->topleft_samples_available&= 0xDF5F;
451 h->left_samples_available&= 0x5F5F;
452 }
453 }
454
455 if(!IS_INTRA(topleft_type) && (topleft_type==0 || h->pps.constrained_intra_pred))
456 h->topleft_samples_available&= 0x7FFF;
457
458 if(!IS_INTRA(topright_type) && (topright_type==0 || h->pps.constrained_intra_pred))
459 h->topright_samples_available&= 0xFBFF;
460
461 if(IS_INTRA4x4(mb_type)){
462 if(IS_INTRA4x4(top_type)){
463 h->intra4x4_pred_mode_cache[4+8*0]= h->intra4x4_pred_mode[top_xy][4];
464 h->intra4x4_pred_mode_cache[5+8*0]= h->intra4x4_pred_mode[top_xy][5];
465 h->intra4x4_pred_mode_cache[6+8*0]= h->intra4x4_pred_mode[top_xy][6];
466 h->intra4x4_pred_mode_cache[7+8*0]= h->intra4x4_pred_mode[top_xy][3];
467 }else{
468 int pred;
469 if(IS_INTRA16x16(top_type) || (IS_INTER(top_type) && !h->pps.constrained_intra_pred))
470 pred= 2;
471 else{
472 pred= -1;
473 }
474 h->intra4x4_pred_mode_cache[4+8*0]=
475 h->intra4x4_pred_mode_cache[5+8*0]=
476 h->intra4x4_pred_mode_cache[6+8*0]=
477 h->intra4x4_pred_mode_cache[7+8*0]= pred;
478 }
479 for(i=0; i<2; i++){
480 if(IS_INTRA4x4(left_type[i])){
481 h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]= h->intra4x4_pred_mode[left_xy[i]][left_block[0+2*i]];
482 h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= h->intra4x4_pred_mode[left_xy[i]][left_block[1+2*i]];
483 }else{
484 int pred;
485 if(IS_INTRA16x16(left_type[i]) || (IS_INTER(left_type[i]) && !h->pps.constrained_intra_pred))
486 pred= 2;
487 else{
488 pred= -1;
489 }
490 h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]=
491 h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= pred;
492 }
493 }
494 }
495 }
496
497
498 /*
499 0 . T T. T T T T
500 1 L . .L . . . .
501 2 L . .L . . . .
502 3 . T TL . . . .
503 4 L . .L . . . .
504 5 L . .. . . . .
505 */
506 //FIXME constraint_intra_pred & partitioning & nnz (lets hope this is just a typo in the spec)
507 if(top_type){
508 h->non_zero_count_cache[4+8*0]= h->non_zero_count[top_xy][0];
509 h->non_zero_count_cache[5+8*0]= h->non_zero_count[top_xy][1];
510 h->non_zero_count_cache[6+8*0]= h->non_zero_count[top_xy][2];
511 h->non_zero_count_cache[7+8*0]= h->non_zero_count[top_xy][3];
512
513 h->non_zero_count_cache[1+8*0]= h->non_zero_count[top_xy][7];
514 h->non_zero_count_cache[2+8*0]= h->non_zero_count[top_xy][8];
515
516 h->non_zero_count_cache[1+8*3]= h->non_zero_count[top_xy][10];
517 h->non_zero_count_cache[2+8*3]= h->non_zero_count[top_xy][11];
518
519 h->top_cbp= h->cbp_table[top_xy];
520 }else{
521 h->non_zero_count_cache[4+8*0]=
522 h->non_zero_count_cache[5+8*0]=
523 h->non_zero_count_cache[6+8*0]=
524 h->non_zero_count_cache[7+8*0]=
525
526 h->non_zero_count_cache[1+8*0]=
527 h->non_zero_count_cache[2+8*0]=
528
529 h->non_zero_count_cache[1+8*3]=
530 h->non_zero_count_cache[2+8*3]= h->pps.cabac && !IS_INTRA(mb_type) ? 0 : 64;
531
532 if(IS_INTRA(mb_type)) h->top_cbp= 0x1C0;
533 else h->top_cbp= 0;
534 }
535
536 if(left_type[0]){
537 h->non_zero_count_cache[3+8*1]= h->non_zero_count[left_xy[0]][6];
538 h->non_zero_count_cache[3+8*2]= h->non_zero_count[left_xy[0]][5];
539 h->non_zero_count_cache[0+8*1]= h->non_zero_count[left_xy[0]][9]; //FIXME left_block
540 h->non_zero_count_cache[0+8*4]= h->non_zero_count[left_xy[0]][12];
541 h->left_cbp= h->cbp_table[left_xy[0]]; //FIXME interlacing
542 }else{
543 h->non_zero_count_cache[3+8*1]=
544 h->non_zero_count_cache[3+8*2]=
545 h->non_zero_count_cache[0+8*1]=
546 h->non_zero_count_cache[0+8*4]= h->pps.cabac && !IS_INTRA(mb_type) ? 0 : 64;
547
548 if(IS_INTRA(mb_type)) h->left_cbp= 0x1C0;//FIXME interlacing
549 else h->left_cbp= 0;
550 }
551
552 if(left_type[1]){
553 h->non_zero_count_cache[3+8*3]= h->non_zero_count[left_xy[1]][4];
554 h->non_zero_count_cache[3+8*4]= h->non_zero_count[left_xy[1]][3];
555 h->non_zero_count_cache[0+8*2]= h->non_zero_count[left_xy[1]][8];
556 h->non_zero_count_cache[0+8*5]= h->non_zero_count[left_xy[1]][11];
557 }else{
558 h->non_zero_count_cache[3+8*3]=
559 h->non_zero_count_cache[3+8*4]=
560 h->non_zero_count_cache[0+8*2]=
561 h->non_zero_count_cache[0+8*5]= h->pps.cabac && !IS_INTRA(mb_type) ? 0 : 64;
562 }
563
564 #if 1
565 if(IS_INTER(mb_type)){
566 int list;
567 for(list=0; list<2; list++){
568 if((!IS_8X8(mb_type)) && !USES_LIST(mb_type, list)){
569 /*if(!h->mv_cache_clean[list]){
570 memset(h->mv_cache [list], 0, 8*5*2*sizeof(int16_t)); //FIXME clean only input? clean at all?
571 memset(h->ref_cache[list], PART_NOT_AVAILABLE, 8*5*sizeof(int8_t));
572 h->mv_cache_clean[list]= 1;
573 }*/
574 continue; //FIXME direct mode ...
575 }
576 h->mv_cache_clean[list]= 0;
577
578 if(IS_INTER(topleft_type)){
579 const int b_xy = h->mb2b_xy[topleft_xy] + 3 + 3*h->b_stride;
580 const int b8_xy= h->mb2b8_xy[topleft_xy] + 1 + h->b8_stride;
581 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy];
582 h->ref_cache[list][scan8[0] - 1 - 1*8]= s->current_picture.ref_index[list][b8_xy];
583 }else{
584 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= 0;
585 h->ref_cache[list][scan8[0] - 1 - 1*8]= topleft_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
586 }
587
588 if(IS_INTER(top_type)){
589 const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride;
590 const int b8_xy= h->mb2b8_xy[top_xy] + h->b8_stride;
591 *(uint32_t*)h->mv_cache[list][scan8[0] + 0 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 0];
592 *(uint32_t*)h->mv_cache[list][scan8[0] + 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 1];
593 *(uint32_t*)h->mv_cache[list][scan8[0] + 2 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 2];
594 *(uint32_t*)h->mv_cache[list][scan8[0] + 3 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 3];
595 h->ref_cache[list][scan8[0] + 0 - 1*8]=
596 h->ref_cache[list][scan8[0] + 1 - 1*8]= s->current_picture.ref_index[list][b8_xy + 0];
597 h->ref_cache[list][scan8[0] + 2 - 1*8]=
598 h->ref_cache[list][scan8[0] + 3 - 1*8]= s->current_picture.ref_index[list][b8_xy + 1];
599 }else{
600 *(uint32_t*)h->mv_cache [list][scan8[0] + 0 - 1*8]=
601 *(uint32_t*)h->mv_cache [list][scan8[0] + 1 - 1*8]=
602 *(uint32_t*)h->mv_cache [list][scan8[0] + 2 - 1*8]=
603 *(uint32_t*)h->mv_cache [list][scan8[0] + 3 - 1*8]= 0;
604 *(uint32_t*)&h->ref_cache[list][scan8[0] + 0 - 1*8]= ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101;
605 }
606
607 if(IS_INTER(topright_type)){
608 const int b_xy= h->mb2b_xy[topright_xy] + 3*h->b_stride;
609 const int b8_xy= h->mb2b8_xy[topright_xy] + h->b8_stride;
610 *(uint32_t*)h->mv_cache[list][scan8[0] + 4 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy];
611 h->ref_cache[list][scan8[0] + 4 - 1*8]= s->current_picture.ref_index[list][b8_xy];
612 }else{
613 *(uint32_t*)h->mv_cache [list][scan8[0] + 4 - 1*8]= 0;
614 h->ref_cache[list][scan8[0] + 4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
615 }
616
617 //FIXME unify cleanup or sth
618 if(IS_INTER(left_type[0])){
619 const int b_xy= h->mb2b_xy[left_xy[0]] + 3;
620 const int b8_xy= h->mb2b8_xy[left_xy[0]] + 1;
621 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 0*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[0]];
622 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[1]];
623 h->ref_cache[list][scan8[0] - 1 + 0*8]=
624 h->ref_cache[list][scan8[0] - 1 + 1*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[0]>>1)];
625 }else{
626 *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 0*8]=
627 *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 1*8]= 0;
628 h->ref_cache[list][scan8[0] - 1 + 0*8]=
629 h->ref_cache[list][scan8[0] - 1 + 1*8]= left_type[0] ? LIST_NOT_USED : PART_NOT_AVAILABLE;
630 }
631
632 if(IS_INTER(left_type[1])){
633 const int b_xy= h->mb2b_xy[left_xy[1]] + 3;
634 const int b8_xy= h->mb2b8_xy[left_xy[1]] + 1;
635 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 2*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[2]];
636 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 3*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[3]];
637 h->ref_cache[list][scan8[0] - 1 + 2*8]=
638 h->ref_cache[list][scan8[0] - 1 + 3*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[2]>>1)];
639 }else{
640 *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 2*8]=
641 *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 3*8]= 0;
642 h->ref_cache[list][scan8[0] - 1 + 2*8]=
643 h->ref_cache[list][scan8[0] - 1 + 3*8]= left_type[0] ? LIST_NOT_USED : PART_NOT_AVAILABLE;
644 }
645
646 h->ref_cache[list][scan8[5 ]+1] =
647 h->ref_cache[list][scan8[7 ]+1] =
648 h->ref_cache[list][scan8[13]+1] = //FIXME remove past 3 (init somewher else)
649 h->ref_cache[list][scan8[4 ]] =
650 h->ref_cache[list][scan8[12]] = PART_NOT_AVAILABLE;
651 *(uint32_t*)h->mv_cache [list][scan8[5 ]+1]=
652 *(uint32_t*)h->mv_cache [list][scan8[7 ]+1]=
653 *(uint32_t*)h->mv_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewher else)
654 *(uint32_t*)h->mv_cache [list][scan8[4 ]]=
655 *(uint32_t*)h->mv_cache [list][scan8[12]]= 0;
656
657 if( h->pps.cabac ) {
658 /* XXX beurk, Load mvd */
659 if(IS_INTER(topleft_type)){
660 const int b_xy = h->mb2b_xy[topleft_xy] + 3 + 3*h->b_stride;
661 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy];
662 }else{
663 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 - 1*8]= 0;
664 }
665
666 if(IS_INTER(top_type)){
667 const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride;
668 *(uint32_t*)h->mvd_cache[list][scan8[0] + 0 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 0];
669 *(uint32_t*)h->mvd_cache[list][scan8[0] + 1 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 1];
670 *(uint32_t*)h->mvd_cache[list][scan8[0] + 2 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 2];
671 *(uint32_t*)h->mvd_cache[list][scan8[0] + 3 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 3];
672 }else{
673 *(uint32_t*)h->mvd_cache [list][scan8[0] + 0 - 1*8]=
674 *(uint32_t*)h->mvd_cache [list][scan8[0] + 1 - 1*8]=
675 *(uint32_t*)h->mvd_cache [list][scan8[0] + 2 - 1*8]=
676 *(uint32_t*)h->mvd_cache [list][scan8[0] + 3 - 1*8]= 0;
677 }
678 if(IS_INTER(left_type[0])){
679 const int b_xy= h->mb2b_xy[left_xy[0]] + 3;
680 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 0*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[0]];
681 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[1]];
682 }else{
683 *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 0*8]=
684 *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 1*8]= 0;
685 }
686 if(IS_INTER(left_type[1])){
687 const int b_xy= h->mb2b_xy[left_xy[1]] + 3;
688 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 2*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[2]];
689 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 3*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[3]];
690 }else{
691 *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 2*8]=
692 *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 3*8]= 0;
693 }
694 *(uint32_t*)h->mvd_cache [list][scan8[5 ]+1]=
695 *(uint32_t*)h->mvd_cache [list][scan8[7 ]+1]=
696 *(uint32_t*)h->mvd_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewher else)
697 *(uint32_t*)h->mvd_cache [list][scan8[4 ]]=
698 *(uint32_t*)h->mvd_cache [list][scan8[12]]= 0;
699 }
700 }
701 //FIXME
702 }
703 #endif
704 }
705
706 static inline void write_back_intra_pred_mode(H264Context *h){
707 MpegEncContext * const s = &h->s;
708 const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
709
710 h->intra4x4_pred_mode[mb_xy][0]= h->intra4x4_pred_mode_cache[7+8*1];
711 h->intra4x4_pred_mode[mb_xy][1]= h->intra4x4_pred_mode_cache[7+8*2];
712 h->intra4x4_pred_mode[mb_xy][2]= h->intra4x4_pred_mode_cache[7+8*3];
713 h->intra4x4_pred_mode[mb_xy][3]= h->intra4x4_pred_mode_cache[7+8*4];
714 h->intra4x4_pred_mode[mb_xy][4]= h->intra4x4_pred_mode_cache[4+8*4];
715 h->intra4x4_pred_mode[mb_xy][5]= h->intra4x4_pred_mode_cache[5+8*4];
716 h->intra4x4_pred_mode[mb_xy][6]= h->intra4x4_pred_mode_cache[6+8*4];
717 }
718
719 /**
720 * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
721 */
722 static inline int check_intra4x4_pred_mode(H264Context *h){
723 MpegEncContext * const s = &h->s;
724 static const int8_t top [12]= {-1, 0,LEFT_DC_PRED,-1,-1,-1,-1,-1, 0};
725 static const int8_t left[12]= { 0,-1, TOP_DC_PRED, 0,-1,-1,-1, 0,-1,DC_128_PRED};
726 int i;
727
728 if(!(h->top_samples_available&0x8000)){
729 for(i=0; i<4; i++){
730 int status= top[ h->intra4x4_pred_mode_cache[scan8[0] + i] ];
731 if(status<0){
732 av_log(h->s.avctx, AV_LOG_ERROR, "top block unavailable for requested intra4x4 mode %d at %d %d\n", status, s->mb_x, s->mb_y);
733 return -1;
734 } else if(status){
735 h->intra4x4_pred_mode_cache[scan8[0] + i]= status;
736 }
737 }
738 }
739
740 if(!(h->left_samples_available&0x8000)){
741 for(i=0; i<4; i++){
742 int status= left[ h->intra4x4_pred_mode_cache[scan8[0] + 8*i] ];
743 if(status<0){
744 av_log(h->s.avctx, AV_LOG_ERROR, "left block unavailable for requested intra4x4 mode %d at %d %d\n", status, s->mb_x, s->mb_y);
745 return -1;
746 } else if(status){
747 h->intra4x4_pred_mode_cache[scan8[0] + 8*i]= status;
748 }
749 }
750 }
751
752 return 0;
753 } //FIXME cleanup like next
754
755 /**
756 * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
757 */
758 static inline int check_intra_pred_mode(H264Context *h, int mode){
759 MpegEncContext * const s = &h->s;
760 static const int8_t top [7]= {LEFT_DC_PRED8x8, 1,-1,-1};
761 static const int8_t left[7]= { TOP_DC_PRED8x8,-1, 2,-1,DC_128_PRED8x8};
762
763 if(mode < 0 || mode > 6) {
764 av_log(h->s.avctx, AV_LOG_ERROR, "out of range intra chroma pred mode at %d %d\n", s->mb_x, s->mb_y);
765 return -1;
766 }
767
768 if(!(h->top_samples_available&0x8000)){
769 mode= top[ mode ];
770 if(mode<0){
771 av_log(h->s.avctx, AV_LOG_ERROR, "top block unavailable for requested intra mode at %d %d\n", s->mb_x, s->mb_y);
772 return -1;
773 }
774 }
775
776 if(!(h->left_samples_available&0x8000)){
777 mode= left[ mode ];
778 if(mode<0){
779 av_log(h->s.avctx, AV_LOG_ERROR, "left block unavailable for requested intra mode at %d %d\n", s->mb_x, s->mb_y);
780 return -1;
781 }
782 }
783
784 return mode;
785 }
786
787 /**
788 * gets the predicted intra4x4 prediction mode.
789 */
790 static inline int pred_intra_mode(H264Context *h, int n){
791 const int index8= scan8[n];
792 const int left= h->intra4x4_pred_mode_cache[index8 - 1];
793 const int top = h->intra4x4_pred_mode_cache[index8 - 8];
794 const int min= FFMIN(left, top);
795
796 tprintf("mode:%d %d min:%d\n", left ,top, min);
797
798 if(min<0) return DC_PRED;
799 else return min;
800 }
801
802 static inline void write_back_non_zero_count(H264Context *h){
803 MpegEncContext * const s = &h->s;
804 const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
805
806 h->non_zero_count[mb_xy][0]= h->non_zero_count_cache[4+8*4];
807 h->non_zero_count[mb_xy][1]= h->non_zero_count_cache[5+8*4];
808 h->non_zero_count[mb_xy][2]= h->non_zero_count_cache[6+8*4];
809 h->non_zero_count[mb_xy][3]= h->non_zero_count_cache[7+8*4];
810 h->non_zero_count[mb_xy][4]= h->non_zero_count_cache[7+8*3];
811 h->non_zero_count[mb_xy][5]= h->non_zero_count_cache[7+8*2];
812 h->non_zero_count[mb_xy][6]= h->non_zero_count_cache[7+8*1];
813
814 h->non_zero_count[mb_xy][7]= h->non_zero_count_cache[1+8*2];
815 h->non_zero_count[mb_xy][8]= h->non_zero_count_cache[2+8*2];
816 h->non_zero_count[mb_xy][9]= h->non_zero_count_cache[2+8*1];
817
818 h->non_zero_count[mb_xy][10]=h->non_zero_count_cache[1+8*5];
819 h->non_zero_count[mb_xy][11]=h->non_zero_count_cache[2+8*5];
820 h->non_zero_count[mb_xy][12]=h->non_zero_count_cache[2+8*4];
821 }
822
823 /**
824 * gets the predicted number of non zero coefficients.
825 * @param n block index
826 */
827 static inline int pred_non_zero_count(H264Context *h, int n){
828 const int index8= scan8[n];
829 const int left= h->non_zero_count_cache[index8 - 1];
830 const int top = h->non_zero_count_cache[index8 - 8];
831 int i= left + top;
832
833 if(i<64) i= (i+1)>>1;
834
835 tprintf("pred_nnz L%X T%X n%d s%d P%X\n", left, top, n, scan8[n], i&31);
836
837 return i&31;
838 }
839
840 static inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, int i, int list, int part_width){
841 const int topright_ref= h->ref_cache[list][ i - 8 + part_width ];
842
843 if(topright_ref != PART_NOT_AVAILABLE){
844 *C= h->mv_cache[list][ i - 8 + part_width ];
845 return topright_ref;
846 }else{
847 tprintf("topright MV not available\n");
848
849 *C= h->mv_cache[list][ i - 8 - 1 ];
850 return h->ref_cache[list][ i - 8 - 1 ];
851 }
852 }
853
854 /**
855 * gets the predicted MV.
856 * @param n the block index
857 * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
858 * @param mx the x component of the predicted motion vector
859 * @param my the y component of the predicted motion vector
860 */
861 static inline void pred_motion(H264Context * const h, int n, int part_width, int list, int ref, int * const mx, int * const my){
862 const int index8= scan8[n];
863 const int top_ref= h->ref_cache[list][ index8 - 8 ];
864 const int left_ref= h->ref_cache[list][ index8 - 1 ];
865 const int16_t * const A= h->mv_cache[list][ index8 - 1 ];
866 const int16_t * const B= h->mv_cache[list][ index8 - 8 ];
867 const int16_t * C;
868 int diagonal_ref, match_count;
869
870 assert(part_width==1 || part_width==2 || part_width==4);
871
872 /* mv_cache
873 B . . A T T T T
874 U . . L . . , .
875 U . . L . . . .
876 U . . L . . , .
877 . . . L . . . .
878 */
879
880 diagonal_ref= fetch_diagonal_mv(h, &C, index8, list, part_width);
881 match_count= (diagonal_ref==ref) + (top_ref==ref) + (left_ref==ref);
882 if(match_count > 1){ //most common
883 *mx= mid_pred(A[0], B[0], C[0]);
884 *my= mid_pred(A[1], B[1], C[1]);
885 }else if(match_count==1){
886 if(left_ref==ref){
887 *mx= A[0];
888 *my= A[1];
889 }else if(top_ref==ref){
890 *mx= B[0];
891 *my= B[1];
892 }else{
893 *mx= C[0];
894 *my= C[1];
895 }
896 }else{
897 if(top_ref == PART_NOT_AVAILABLE && diagonal_ref == PART_NOT_AVAILABLE && left_ref != PART_NOT_AVAILABLE){
898 *mx= A[0];
899 *my= A[1];
900 }else{
901 *mx= mid_pred(A[0], B[0], C[0]);
902 *my= mid_pred(A[1], B[1], C[1]);
903 }
904 }
905
906 tprintf("pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref, A[0], A[1], ref, *mx, *my, h->s.mb_x, h->s.mb_y, n, list);
907 }
908
909 /**
910 * gets the directionally predicted 16x8 MV.
911 * @param n the block index
912 * @param mx the x component of the predicted motion vector
913 * @param my the y component of the predicted motion vector
914 */
915 static inline void pred_16x8_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){
916 if(n==0){
917 const int top_ref= h->ref_cache[list][ scan8[0] - 8 ];
918 const int16_t * const B= h->mv_cache[list][ scan8[0] - 8 ];
919
920 tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d", top_ref, B[0], B[1], h->s.mb_x, h->s.mb_y, n, list);
921
922 if(top_ref == ref){
923 *mx= B[0];
924 *my= B[1];
925 return;
926 }
927 }else{
928 const int left_ref= h->ref_cache[list][ scan8[8] - 1 ];
929 const int16_t * const A= h->mv_cache[list][ scan8[8] - 1 ];
930
931 tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list);
932
933 if(left_ref == ref){
934 *mx= A[0];
935 *my= A[1];
936 return;
937 }
938 }
939
940 //RARE
941 pred_motion(h, n, 4, list, ref, mx, my);
942 }
943
944 /**
945 * gets the directionally predicted 8x16 MV.
946 * @param n the block index
947 * @param mx the x component of the predicted motion vector
948 * @param my the y component of the predicted motion vector
949 */
950 static inline void pred_8x16_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){
951 if(n==0){
952 const int left_ref= h->ref_cache[list][ scan8[0] - 1 ];
953 const int16_t * const A= h->mv_cache[list][ scan8[0] - 1 ];
954
955 tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list);
956
957 if(left_ref == ref){
958 *mx= A[0];
959 *my= A[1];
960 return;
961 }
962 }else{
963 const int16_t * C;
964 int diagonal_ref;
965
966 diagonal_ref= fetch_diagonal_mv(h, &C, scan8[4], list, 2);
967
968 tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d", diagonal_ref, C[0], C[1], h->s.mb_x, h->s.mb_y, n, list);
969
970 if(diagonal_ref == ref){
971 *mx= C[0];
972 *my= C[1];
973 return;
974 }
975 }
976
977 //RARE
978 pred_motion(h, n, 2, list, ref, mx, my);
979 }
980
981 static inline void pred_pskip_motion(H264Context * const h, int * const mx, int * const my){
982 const int top_ref = h->ref_cache[0][ scan8[0] - 8 ];
983 const int left_ref= h->ref_cache[0][ scan8[0] - 1 ];
984
985 tprintf("pred_pskip: (%d) (%d) at %2d %2d\n", top_ref, left_ref, h->s.mb_x, h->s.mb_y);
986
987 if(top_ref == PART_NOT_AVAILABLE || left_ref == PART_NOT_AVAILABLE
988 || (top_ref == 0 && *(uint32_t*)h->mv_cache[0][ scan8[0] - 8 ] == 0)
989 || (left_ref == 0 && *(uint32_t*)h->mv_cache[0][ scan8[0] - 1 ] == 0)){
990
991 *mx = *my = 0;
992 return;
993 }
994
995 pred_motion(h, 0, 4, 0, 0, mx, my);
996
997 return;
998 }
999
1000 static inline void write_back_motion(H264Context *h, int mb_type){
1001 MpegEncContext * const s = &h->s;
1002 const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride;
1003 const int b8_xy= 2*s->mb_x + 2*s->mb_y*h->b8_stride;
1004 int list;
1005
1006 for(list=0; list<2; list++){
1007 int y;
1008 if((!IS_8X8(mb_type)) && !USES_LIST(mb_type, list)){
1009 if(1){ //FIXME skip or never read if mb_type doesnt use it
1010 for(y=0; y<4; y++){
1011 *(uint64_t*)s->current_picture.motion_val[list][b_xy + 0 + y*h->b_stride]=
1012 *(uint64_t*)s->current_picture.motion_val[list][b_xy + 2 + y*h->b_stride]= 0;
1013 }
1014 if( h->pps.cabac ) {
1015 /* FIXME needed ? */
1016 for(y=0; y<4; y++){
1017 *(uint64_t*)h->mvd_table[list][b_xy + 0 + y*h->b_stride]=
1018 *(uint64_t*)h->mvd_table[list][b_xy + 2 + y*h->b_stride]= 0;
1019 }
1020 }
1021 for(y=0; y<2; y++){
1022 *(uint16_t*)&s->current_picture.ref_index[list][b8_xy + y*h->b8_stride]= (LIST_NOT_USED&0xFF)*0x0101;
1023 }
1024 }
1025 continue; //FIXME direct mode ...
1026 }
1027
1028 for(y=0; y<4; y++){
1029 *(uint64_t*)s->current_picture.motion_val[list][b_xy + 0 + y*h->b_stride]= *(uint64_t*)h->mv_cache[list][scan8[0]+0 + 8*y];
1030 *(uint64_t*)s->current_picture.motion_val[list][b_xy + 2 + y*h->b_stride]= *(uint64_t*)h->mv_cache[list][scan8[0]+2 + 8*y];
1031 }
1032 if( h->pps.cabac ) {
1033 for(y=0; y<4; y++){
1034 *(uint64_t*)h->mvd_table[list][b_xy + 0 + y*h->b_stride]= *(uint64_t*)h->mvd_cache[list][scan8[0]+0 + 8*y];
1035 *(uint64_t*)h->mvd_table[list][b_xy + 2 + y*h->b_stride]= *(uint64_t*)h->mvd_cache[list][scan8[0]+2 + 8*y];
1036 }
1037 }
1038 for(y=0; y<2; y++){
1039 s->current_picture.ref_index[list][b8_xy + 0 + y*h->b8_stride]= h->ref_cache[list][scan8[0]+0 + 16*y];
1040 s->current_picture.ref_index[list][b8_xy + 1 + y*h->b8_stride]= h->ref_cache[list][scan8[0]+2 + 16*y];
1041 }
1042 }
1043 }
1044
1045 /**
1046 * Decodes a network abstraction layer unit.
1047 * @param consumed is the number of bytes used as input
1048 * @param length is the length of the array
1049 * @param dst_length is the number of decoded bytes FIXME here or a decode rbsp ttailing?
1050 * @returns decoded bytes, might be src+1 if no escapes
1051 */
1052 static uint8_t *decode_nal(H264Context *h, uint8_t *src, int *dst_length, int *consumed, int length){
1053 int i, si, di;
1054 uint8_t *dst;
1055
1056 // src[0]&0x80; //forbidden bit
1057 h->nal_ref_idc= src[0]>>5;
1058 h->nal_unit_type= src[0]&0x1F;
1059
1060 src++; length--;
1061 #if 0
1062 for(i=0; i<length; i++)
1063 printf("%2X ", src[i]);
1064 #endif
1065 for(i=0; i+1<length; i+=2){
1066 if(src[i]) continue;
1067 if(i>0 && src[i-1]==0) i--;
1068 if(i+2<length && src[i+1]==0 && src[i+2]<=3){
1069 if(src[i+2]!=3){
1070 /* startcode, so we must be past the end */
1071 length=i;
1072 }
1073 break;
1074 }
1075 }
1076
1077 if(i>=length-1){ //no escaped 0
1078 *dst_length= length;
1079 *consumed= length+1; //+1 for the header
1080 return src;
1081 }
1082
1083 h->rbsp_buffer= av_fast_realloc(h->rbsp_buffer, &h->rbsp_buffer_size, length);
1084 dst= h->rbsp_buffer;
1085
1086 //printf("deoding esc\n");
1087 si=di=0;
1088 while(si<length){
1089 //remove escapes (very rare 1:2^22)
1090 if(si+2<length && src[si]==0 && src[si+1]==0 && src[si+2]<=3){
1091 if(src[si+2]==3){ //escape
1092 dst[di++]= 0;
1093 dst[di++]= 0;
1094 si+=3;
1095 continue;
1096 }else //next start code
1097 break;
1098 }
1099
1100 dst[di++]= src[si++];
1101 }
1102
1103 *dst_length= di;
1104 *consumed= si + 1;//+1 for the header
1105 //FIXME store exact number of bits in the getbitcontext (its needed for decoding)
1106 return dst;
1107 }
1108
1109 #if 0
1110 /**
1111 * @param src the data which should be escaped
1112 * @param dst the target buffer, dst+1 == src is allowed as a special case
1113 * @param length the length of the src data
1114 * @param dst_length the length of the dst array
1115 * @returns length of escaped data in bytes or -1 if an error occured
1116 */
1117 static int encode_nal(H264Context *h, uint8_t *dst, uint8_t *src, int length, int dst_length){
1118 int i, escape_count, si, di;
1119 uint8_t *temp;
1120
1121 assert(length>=0);
1122 assert(dst_length>0);
1123
1124 dst[0]= (h->nal_ref_idc<<5) + h->nal_unit_type;
1125
1126 if(length==0) return 1;
1127
1128 escape_count= 0;
1129 for(i=0; i<length; i+=2){
1130 if(src[i]) continue;
1131 if(i>0 && src[i-1]==0)
1132 i--;
1133 if(i+2<length && src[i+1]==0 && src[i+2]<=3){
1134 escape_count++;
1135 i+=2;
1136 }
1137 }
1138
1139 if(escape_count==0){
1140 if(dst+1 != src)
1141 memcpy(dst+1, src, length);
1142 return length + 1;
1143 }
1144
1145 if(length + escape_count + 1> dst_length)
1146 return -1;
1147
1148 //this should be damn rare (hopefully)
1149
1150 h->rbsp_buffer= av_fast_realloc(h->rbsp_buffer, &h->rbsp_buffer_size, length + escape_count);
1151 temp= h->rbsp_buffer;
1152 //printf("encoding esc\n");
1153
1154 si= 0;
1155 di= 0;
1156 while(si < length){
1157 if(si+2<length && src[si]==0 && src[si+1]==0 && src[si+2]<=3){
1158 temp[di++]= 0; si++;
1159 temp[di++]= 0; si++;
1160 temp[di++]= 3;
1161 temp[di++]= src[si++];
1162 }
1163 else
1164 temp[di++]= src[si++];
1165 }
1166 memcpy(dst+1, temp, length+escape_count);
1167
1168 assert(di == length+escape_count);
1169
1170 return di + 1;
1171 }
1172
1173 /**
1174 * write 1,10,100,1000,... for alignment, yes its exactly inverse to mpeg4
1175 */
1176 static void encode_rbsp_trailing(PutBitContext *pb){
1177 int length;
1178 put_bits(pb, 1, 1);
1179 length= (-put_bits_count(pb))&7;
1180 if(length) put_bits(pb, length, 0);
1181 }
1182 #endif
1183
1184 /**
1185 * identifies the exact end of the bitstream
1186 * @return the length of the trailing, or 0 if damaged
1187 */
1188 static int decode_rbsp_trailing(uint8_t *src){
1189 int v= *src;
1190 int r;
1191
1192 tprintf("rbsp trailing %X\n", v);
1193
1194 for(r=1; r<9; r++){
1195 if(v&1) return r;
1196 v>>=1;
1197 }
1198 return 0;
1199 }
1200
1201 /**
1202 * idct tranforms the 16 dc values and dequantize them.
1203 * @param qp quantization parameter
1204 */
1205 static void h264_luma_dc_dequant_idct_c(DCTELEM *block, int qp){
1206 const int qmul= dequant_coeff[qp][0];
1207 #define stride 16
1208 int i;
1209 int temp[16]; //FIXME check if this is a good idea
1210 static const int x_offset[4]={0, 1*stride, 4* stride, 5*stride};
1211 static const int y_offset[4]={0, 2*stride, 8* stride, 10*stride};
1212
1213 //memset(block, 64, 2*256);
1214 //return;
1215 for(i=0; i<4; i++){
1216 const int offset= y_offset[i];
1217 const int z0= block[offset+stride*0] + block[offset+stride*4];
1218 const int z1= block[offset+stride*0] - block[offset+stride*4];
1219 const int z2= block[offset+stride*1] - block[offset+stride*5];
1220 const int z3= block[offset+stride*1] + block[offset+stride*5];
1221
1222 temp[4*i+0]= z0+z3;
1223 temp[4*i+1]= z1+z2;
1224 temp[4*i+2]= z1-z2;
1225 temp[4*i+3]= z0-z3;
1226 }
1227
1228 for(i=0; i<4; i++){
1229 const int offset= x_offset[i];
1230 const int z0= temp[4*0+i] + temp[4*2+i];
1231 const int z1= temp[4*0+i] - temp[4*2+i];
1232 const int z2= temp[4*1+i] - temp[4*3+i];
1233 const int z3= temp[4*1+i] + temp[4*3+i];
1234
1235 block[stride*0 +offset]= ((z0 + z3)*qmul + 2)>>2; //FIXME think about merging this into decode_resdual
1236 block[stride*2 +offset]= ((z1 + z2)*qmul + 2)>>2;
1237 block[stride*8 +offset]= ((z1 - z2)*qmul + 2)>>2;
1238 block[stride*10+offset]= ((z0 - z3)*qmul + 2)>>2;
1239 }
1240 }
1241
1242 #if 0
1243 /**
1244 * dct tranforms the 16 dc values.
1245 * @param qp quantization parameter ??? FIXME
1246 */
1247 static void h264_luma_dc_dct_c(DCTELEM *block/*, int qp*/){
1248 // const int qmul= dequant_coeff[qp][0];
1249 int i;
1250 int temp[16]; //FIXME check if this is a good idea
1251 static const int x_offset[4]={0, 1*stride, 4* stride, 5*stride};
1252 static const int y_offset[4]={0, 2*stride, 8* stride, 10*stride};
1253
1254 for(i=0; i<4; i++){
1255 const int offset= y_offset[i];
1256 const int z0= block[offset+stride*0] + block[offset+stride*4];
1257 const int z1= block[offset+stride*0] - block[offset+stride*4];
1258 const int z2= block[offset+stride*1] - block[offset+stride*5];
1259 const int z3= block[offset+stride*1] + block[offset+stride*5];
1260
1261 temp[4*i+0]= z0+z3;
1262 temp[4*i+1]= z1+z2;
1263 temp[4*i+2]= z1-z2;
1264 temp[4*i+3]= z0-z3;
1265 }
1266
1267 for(i=0; i<4; i++){
1268 const int offset= x_offset[i];
1269 const int z0= temp[4*0+i] + temp[4*2+i];
1270 const int z1= temp[4*0+i] - temp[4*2+i];
1271 const int z2= temp[4*1+i] - temp[4*3+i];
1272 const int z3= temp[4*1+i] + temp[4*3+i];
1273
1274 block[stride*0 +offset]= (z0 + z3)>>1;
1275 block[stride*2 +offset]= (z1 + z2)>>1;
1276 block[stride*8 +offset]= (z1 - z2)>>1;
1277 block[stride*10+offset]= (z0 - z3)>>1;
1278 }
1279 }
1280 #endif
1281
1282 #undef xStride
1283 #undef stride
1284
1285 static void chroma_dc_dequant_idct_c(DCTELEM *block, int qp){
1286 const int qmul= dequant_coeff[qp][0];
1287 const int stride= 16*2;
1288 const int xStride= 16;
1289 int a,b,c,d,e;
1290
1291 a= block[stride*0 + xStride*0];
1292 b= block[stride*0 + xStride*1];
1293 c= block[stride*1 + xStride*0];
1294 d= block[stride*1 + xStride*1];
1295
1296 e= a-b;
1297 a= a+b;
1298 b= c-d;
1299 c= c+d;
1300
1301 block[stride*0 + xStride*0]= ((a+c)*qmul + 0)>>1;
1302 block[stride*0 + xStride*1]= ((e+b)*qmul + 0)>>1;
1303 block[stride*1 + xStride*0]= ((a-c)*qmul + 0)>>1;
1304 block[stride*1 + xStride*1]= ((e-b)*qmul + 0)>>1;
1305 }
1306
1307 #if 0
1308 static void chroma_dc_dct_c(DCTELEM *block){
1309 const int stride= 16*2;
1310 const int xStride= 16;
1311 int a,b,c,d,e;
1312
1313 a= block[stride*0 + xStride*0];
1314 b= block[stride*0 + xStride*1];
1315 c= block[stride*1 + xStride*0];
1316 d= block[stride*1 + xStride*1];
1317
1318 e= a-b;
1319 a= a+b;
1320 b= c-d;
1321 c= c+d;
1322
1323 block[stride*0 + xStride*0]= (a+c);
1324 block[stride*0 + xStride*1]= (e+b);
1325 block[stride*1 + xStride*0]= (a-c);
1326 block[stride*1 + xStride*1]= (e-b);
1327 }
1328 #endif
1329
1330 /**
1331 * gets the chroma qp.
1332 */
1333 static inline int get_chroma_qp(H264Context *h, int qscale){
1334
1335 return chroma_qp[clip(qscale + h->pps.chroma_qp_index_offset, 0, 51)];
1336 }
1337
1338
1339 #if 0
1340 static void h264_diff_dct_c(DCTELEM *block, uint8_t *src1, uint8_t *src2, int stride){
1341 int i;
1342 //FIXME try int temp instead of block
1343
1344 for(i=0; i<4; i++){
1345 const int d0= src1[0 + i*stride] - src2[0 + i*stride];
1346 const int d1= src1[1 + i*stride] - src2[1 + i*stride];
1347 const int d2= src1[2 + i*stride] - src2[2 + i*stride];
1348 const int d3= src1[3 + i*stride] - src2[3 + i*stride];
1349 const int z0= d0 + d3;
1350 const int z3= d0 - d3;
1351 const int z1= d1 + d2;
1352 const int z2= d1 - d2;
1353
1354 block[0 + 4*i]= z0 + z1;
1355 block[1 + 4*i]= 2*z3 + z2;
1356 block[2 + 4*i]= z0 - z1;
1357 block[3 + 4*i]= z3 - 2*z2;
1358 }
1359
1360 for(i=0; i<4; i++){
1361 const int z0= block[0*4 + i] + block[3*4 + i];
1362 const int z3= block[0*4 + i] - block[3*4 + i];
1363 const int z1= block[1*4 + i] + block[2*4 + i];
1364 const int z2= block[1*4 + i] - block[2*4 + i];
1365
1366 block[0*4 + i]= z0 + z1;
1367 block[1*4 + i]= 2*z3 + z2;
1368 block[2*4 + i]= z0 - z1;
1369 block[3*4 + i]= z3 - 2*z2;
1370 }
1371 }
1372 #endif
1373
1374 //FIXME need to check that this doesnt overflow signed 32 bit for low qp, iam not sure, its very close
1375 //FIXME check that gcc inlines this (and optimizes intra & seperate_dc stuff away)
1376 static inline int quantize_c(DCTELEM *block, uint8_t *scantable, int qscale, int intra, int seperate_dc){
1377 int i;
1378 const int * const quant_table= quant_coeff[qscale];
1379 const int bias= intra ? (1<<QUANT_SHIFT)/3 : (1<<QUANT_SHIFT)/6;
1380 const unsigned int threshold1= (1<<QUANT_SHIFT) - bias - 1;
1381 const unsigned int threshold2= (threshold1<<1);
1382 int last_non_zero;
1383
1384 if(seperate_dc){
1385 if(qscale<=18){
1386 //avoid overflows
1387 const int dc_bias= intra ? (1<<(QUANT_SHIFT-2))/3 : (1<<(QUANT_SHIFT-2))/6;
1388 const unsigned int dc_threshold1= (1<<(QUANT_SHIFT-2)) - dc_bias - 1;
1389 const unsigned int dc_threshold2= (dc_threshold1<<1);
1390
1391 int level= block[0]*quant_coeff[qscale+18][0];
1392 if(((unsigned)(level+dc_threshold1))>dc_threshold2){
1393 if(level>0){
1394 level= (dc_bias + level)>>(QUANT_SHIFT-2);
1395 block[0]= level;
1396 }else{
1397 level= (dc_bias - level)>>(QUANT_SHIFT-2);
1398 block[0]= -level;
1399 }
1400 // last_non_zero = i;
1401 }else{
1402 block[0]=0;
1403 }
1404 }else{
1405 const int dc_bias= intra ? (1<<(QUANT_SHIFT+1))/3 : (1<<(QUANT_SHIFT+1))/6;
1406 const unsigned int dc_threshold1= (1<<(QUANT_SHIFT+1)) - dc_bias - 1;
1407 const unsigned int dc_threshold2= (dc_threshold1<<1);
1408
1409 int level= block[0]*quant_table[0];
1410 if(((unsigned)(level+dc_threshold1))>dc_threshold2){
1411 if(level>0){
1412 level= (dc_bias + level)>>(QUANT_SHIFT+1);
1413 block[0]= level;
1414 }else{
1415 level= (dc_bias - level)>>(QUANT_SHIFT+1);
1416 block[0]= -level;
1417 }
1418 // last_non_zero = i;
1419 }else{
1420 block[0]=0;
1421 }
1422 }
1423 last_non_zero= 0;
1424 i=1;
1425 }else{
1426 last_non_zero= -1;
1427 i=0;
1428 }
1429
1430 for(; i<16; i++){
1431 const int j= scantable[i];
1432 int level= block[j]*quant_table[j];
1433
1434 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
1435 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
1436 if(((unsigned)(level+threshold1))>threshold2){
1437 if(level>0){
1438 level= (bias + level)>>QUANT_SHIFT;
1439 block[j]= level;
1440 }else{
1441 level= (bias - level)>>QUANT_SHIFT;
1442 block[j]= -level;
1443 }
1444 last_non_zero = i;
1445 }else{
1446 block[j]=0;
1447 }
1448 }
1449
1450 return last_non_zero;
1451 }
1452
1453 static void pred4x4_vertical_c(uint8_t *src, uint8_t *topright, int stride){
1454 const uint32_t a= ((uint32_t*)(src-stride))[0];
1455 ((uint32_t*)(src+0*stride))[0]= a;
1456 ((uint32_t*)(src+1*stride))[0]= a;
1457 ((uint32_t*)(src+2*stride))[0]= a;
1458 ((uint32_t*)(src+3*stride))[0]= a;
1459 }
1460
1461 static void pred4x4_horizontal_c(uint8_t *src, uint8_t *topright, int stride){
1462 ((uint32_t*)(src+0*stride))[0]= src[-1+0*stride]*0x01010101;
1463 ((uint32_t*)(src+1*stride))[0]= src[-1+1*stride]*0x01010101;
1464 ((uint32_t*)(src+2*stride))[0]= src[-1+2*stride]*0x01010101;
1465 ((uint32_t*)(src+3*stride))[0]= src[-1+3*stride]*0x01010101;
1466 }
1467
1468 static void pred4x4_dc_c(uint8_t *src, uint8_t *topright, int stride){
1469 const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride]
1470 + src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 4) >>3;
1471
1472 ((uint32_t*)(src+0*stride))[0]=
1473 ((uint32_t*)(src+1*stride))[0]=
1474 ((uint32_t*)(src+2*stride))[0]=
1475 ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
1476 }
1477
1478 static void pred4x4_left_dc_c(uint8_t *src, uint8_t *topright, int stride){
1479 const int dc= ( src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 2) >>2;
1480
1481 ((uint32_t*)(src+0*stride))[0]=
1482 ((uint32_t*)(src+1*stride))[0]=
1483 ((uint32_t*)(src+2*stride))[0]=
1484 ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
1485 }
1486
1487 static void pred4x4_top_dc_c(uint8_t *src, uint8_t *topright, int stride){
1488 const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride] + 2) >>2;
1489
1490 ((uint32_t*)(src+0*stride))[0]=
1491 ((uint32_t*)(src+1*stride))[0]=
1492 ((uint32_t*)(src+2*stride))[0]=
1493 ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
1494 }
1495
1496 static void pred4x4_128_dc_c(uint8_t *src, uint8_t *topright, int stride){
1497 ((uint32_t*)(src+0*stride))[0]=
1498 ((uint32_t*)(src+1*stride))[0]=
1499 ((uint32_t*)(src+2*stride))[0]=
1500 ((uint32_t*)(src+3*stride))[0]= 128U*0x01010101U;
1501 }
1502
1503
1504 #define LOAD_TOP_RIGHT_EDGE\
1505 const int t4= topright[0];\
1506 const int t5= topright[1];\
1507 const int t6= topright[2];\
1508 const int t7= topright[3];\
1509
1510 #define LOAD_LEFT_EDGE\
1511 const int l0= src[-1+0*stride];\
1512 const int l1= src[-1+1*stride];\
1513 const int l2= src[-1+2*stride];\
1514 const int l3= src[-1+3*stride];\
1515
1516 #define LOAD_TOP_EDGE\
1517 const int t0= src[ 0-1*stride];\
1518 const int t1= src[ 1-1*stride];\
1519 const int t2= src[ 2-1*stride];\
1520 const int t3= src[ 3-1*stride];\
1521
1522 static void pred4x4_down_right_c(uint8_t *src, uint8_t *topright, int stride){
1523 const int lt= src[-1-1*stride];
1524 LOAD_TOP_EDGE
1525 LOAD_LEFT_EDGE
1526
1527 src[0+3*stride]=(l3 + 2*l2 + l1 + 2)>>2;
1528 src[0+2*stride]=
1529 src[1+3*stride]=(l2 + 2*l1 + l0 + 2)>>2;
1530 src[0+1*stride]=
1531 src[1+2*stride]=
1532 src[2+3*stride]=(l1 + 2*l0 + lt + 2)>>2;
1533 src[0+0*stride]=
1534 src[1+1*stride]=
1535 src[2+2*stride]=
1536 src[3+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
1537 src[1+0*stride]=
1538 src[2+1*stride]=
1539 src[3+2*stride]=(lt + 2*t0 + t1 + 2)>>2;
1540 src[2+0*stride]=
1541 src[3+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
1542 src[3+0*stride]=(t1 + 2*t2 + t3 + 2)>>2;
1543 }
1544
1545 static void pred4x4_down_left_c(uint8_t *src, uint8_t *topright, int stride){
1546 LOAD_TOP_EDGE
1547 LOAD_TOP_RIGHT_EDGE
1548 // LOAD_LEFT_EDGE
1549
1550 src[0+0*stride]=(t0 + t2 + 2*t1 + 2)>>2;
1551 src[1+0*stride]=
1552 src[0+1*stride]=(t1 + t3 + 2*t2 + 2)>>2;
1553 src[2+0*stride]=
1554 src[1+1*stride]=
1555 src[0+2*stride]=(t2 + t4 + 2*t3 + 2)>>2;
1556 src[3+0*stride]=
1557 src[2+1*stride]=
1558 src[1+2*stride]=
1559 src[0+3*stride]=(t3 + t5 + 2*t4 + 2)>>2;
1560 src[3+1*stride]=
1561 src[2+2*stride]=
1562 src[1+3*stride]=(t4 + t6 + 2*t5 + 2)>>2;
1563 src[3+2*stride]=
1564 src[2+3*stride]=(t5 + t7 + 2*t6 + 2)>>2;
1565 src[3+3*stride]=(t6 + 3*t7 + 2)>>2;
1566 }
1567
1568 static void pred4x4_vertical_right_c(uint8_t *src, uint8_t *topright, int stride){
1569 const int lt= src[-1-1*stride];
1570 LOAD_TOP_EDGE
1571 LOAD_LEFT_EDGE
1572 const __attribute__((unused)) int unu= l3;
1573
1574 src[0+0*stride]=
1575 src[1+2*stride]=(lt + t0 + 1)>>1;
1576 src[1+0*stride]=
1577 src[2+2*stride]=(t0 + t1 + 1)>>1;
1578 src[2+0*stride]=
1579 src[3+2*stride]=(t1 + t2 + 1)>>1;
1580 src[3+0*stride]=(t2 + t3 + 1)>>1;
1581 src[0+1*stride]=
1582 src[1+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
1583 src[1+1*stride]=
1584 src[2+3*stride]=(lt + 2*t0 + t1 + 2)>>2;
1585 src[2+1*stride]=
1586 src[3+3*stride]=(t0 + 2*t1 + t2 + 2)>>2;
1587 src[3+1*stride]=(t1 + 2*t2 + t3 + 2)>>2;
1588 src[0+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
1589 src[0+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
1590 }
1591
1592 static void pred4x4_vertical_left_c(uint8_t *src, uint8_t *topright, int stride){
1593 LOAD_TOP_EDGE
1594 LOAD_TOP_RIGHT_EDGE
1595 const __attribute__((unused)) int unu= t7;
1596
1597 src[0+0*stride]=(t0 + t1 + 1)>>1;
1598 src[1+0*stride]=
1599 src[0+2*stride]=(t1 + t2 + 1)>>1;
1600 src[2+0*stride]=
1601 src[1+2*stride]=(t2 + t3 + 1)>>1;
1602 src[3+0*stride]=
1603 src[2+2*stride]=(t3 + t4+ 1)>>1;
1604 src[3+2*stride]=(t4 + t5+ 1)>>1;
1605 src[0+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
1606 src[1+1*stride]=
1607 src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
1608 src[2+1*stride]=
1609 src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
1610 src[3+1*stride]=
1611 src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
1612 src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2;
1613 }
1614
1615 static void pred4x4_horizontal_up_c(uint8_t *src, uint8_t *topright, int stride){
1616 LOAD_LEFT_EDGE
1617
1618 src[0+0*stride]=(l0 + l1 + 1)>>1;
1619 src[1+0*stride]=(l0 + 2*l1 + l2 + 2)>>2;
1620 src[2+0*stride]=
1621 src[0+1*stride]=(l1 + l2 + 1)>>1;
1622 src[3+0*stride]=
1623 src[1+1*stride]=(l1 + 2*l2 + l3 + 2)>>2;
1624 src[2+1*stride]=
1625 src[0+2*stride]=(l2 + l3 + 1)>>1;
1626 src[3+1*stride]=
1627 src[1+2*stride]=(l2 + 2*l3 + l3 + 2)>>2;
1628 src[3+2*stride]=
1629 src[1+3*stride]=
1630 src[0+3*stride]=
1631 src[2+2*stride]=
1632 src[2+3*stride]=
1633 src[3+3*stride]=l3;
1634 }
1635
1636 static void pred4x4_horizontal_down_c(uint8_t *src, uint8_t *topright, int stride){
1637 const int lt= src[-1-1*stride];
1638 LOAD_TOP_EDGE
1639 LOAD_LEFT_EDGE
1640 const __attribute__((unused)) int unu= t3;
1641
1642 src[0+0*stride]=
1643 src[2+1*stride]=(lt + l0 + 1)>>1;
1644 src[1+0*stride]=
1645 src[3+1*stride]=(l0 + 2*lt + t0 + 2)>>2;
1646 src[2+0*stride]=(lt + 2*t0 + t1 + 2)>>2;
1647 src[3+0*stride]=(t0 + 2*t1 + t2 + 2)>>2;
1648 src[0+1*stride]=
1649 src[2+2*stride]=(l0 + l1 + 1)>>1;
1650 src[1+1*stride]=
1651 src[3+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
1652 src[0+2*stride]=
1653 src[2+3*stride]=(l1 + l2+ 1)>>1;
1654 src[1+2*stride]=
1655 src[3+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
1656 src[0+3*stride]=(l2 + l3 + 1)>>1;
1657 src[1+3*stride]=(l1 + 2*l2 + l3 + 2)>>2;
1658 }
1659
1660 static void pred16x16_vertical_c(uint8_t *src, int stride){
1661 int i;
1662 const uint32_t a= ((uint32_t*)(src-stride))[0];
1663 const uint32_t b= ((uint32_t*)(src-stride))[1];
1664 const uint32_t c= ((uint32_t*)(src-stride))[2];
1665 const uint32_t d= ((uint32_t*)(src-stride))[3];
1666
1667 for(i=0; i<16; i++){
1668 ((uint32_t*)(src+i*stride))[0]= a;
1669 ((uint32_t*)(src+i*stride))[1]= b;
1670 ((uint32_t*)(src+i*stride))[2]= c;
1671 ((uint32_t*)(src+i*stride))[3]= d;
1672 }
1673 }
1674
1675 static void pred16x16_horizontal_c(uint8_t *src, int stride){
1676 int i;
1677
1678 for(i=0; i<16; i++){
1679 ((uint32_t*)(src+i*stride))[0]=
1680 ((uint32_t*)(src+i*stride))[1]=
1681 ((uint32_t*)(src+i*stride))[2]=
1682 ((uint32_t*)(src+i*stride))[3]= src[-1+i*stride]*0x01010101;
1683 }
1684 }
1685
1686 static void pred16x16_dc_c(uint8_t *src, int stride){
1687 int i, dc=0;
1688
1689 for(i=0;i<16; i++){
1690 dc+= src[-1+i*stride];
1691 }
1692
1693 for(i=0;i<16; i++){
1694 dc+= src[i-stride];
1695 }
1696
1697 dc= 0x01010101*((dc + 16)>>5);
1698
1699 for(i=0; i<16; i++){
1700 ((uint32_t*)(src+i*stride))[0]=
1701 ((uint32_t*)(src+i*stride))[1]=
1702 ((uint32_t*)(src+i*stride))[2]=
1703 ((uint32_t*)(src+i*stride))[3]= dc;
1704 }
1705 }
1706
1707 static void pred16x16_left_dc_c(uint8_t *src, int stride){
1708 int i, dc=0;
1709
1710 for(i=0;i<16; i++){
1711 dc+= src[-1+i*stride];
1712 }
1713
1714 dc= 0x01010101*((dc + 8)>>4);
1715
1716 for(i=0; i<16; i++){
1717 ((uint32_t*)(src+i*stride))[0]=
1718 ((uint32_t*)(src+i*stride))[1]=
1719 ((uint32_t*)(src+i*stride))[2]=
1720 ((uint32_t*)(src+i*stride))[3]= dc;
1721 }
1722 }
1723
1724 static void pred16x16_top_dc_c(uint8_t *src, int stride){
1725 int i, dc=0;
1726
1727 for(i=0;i<16; i++){
1728 dc+= src[i-stride];
1729 }
1730 dc= 0x01010101*((dc + 8)>>4);
1731
1732 for(i=0; i<16; i++){
1733 ((uint32_t*)(src+i*stride))[0]=
1734 ((uint32_t*)(src+i*stride))[1]=
1735 ((uint32_t*)(src+i*stride))[2]=
1736 ((uint32_t*)(src+i*stride))[3]= dc;
1737 }
1738 }
1739
1740 static void pred16x16_128_dc_c(uint8_t *src, int stride){
1741 int i;
1742
1743 for(i=0; i<16; i++){
1744 ((uint32_t*)(src+i*stride))[0]=
1745 ((uint32_t*)(src+i*stride))[1]=
1746 ((uint32_t*)(src+i*stride))[2]=
1747 ((uint32_t*)(src+i*stride))[3]= 0x01010101U*128U;
1748 }
1749 }
1750
1751 static inline void pred16x16_plane_compat_c(uint8_t *src, int stride, const int svq3){
1752 int i, j, k;
1753 int a;
1754 uint8_t *cm = cropTbl + MAX_NEG_CROP;
1755 const uint8_t * const src0 = src+7-stride;
1756 const uint8_t *src1 = src+8*stride-1;
1757 const uint8_t *src2 = src1-2*stride; // == src+6*stride-1;
1758 int H = src0[1] - src0[-1];
1759 int V = src1[0] - src2[ 0];
1760 for(k=2; k<=8; ++k) {
1761 src1 += stride; src2 -= stride;
1762 H += k*(src0[k] - src0[-k]);
1763 V += k*(src1[0] - src2[ 0]);
1764 }
1765 if(svq3){
1766 H = ( 5*(H/4) ) / 16;
1767 V = ( 5*(V/4) ) / 16;
1768
1769 /* required for 100% accuracy */
1770 i = H; H = V; V = i;
1771 }else{
1772 H = ( 5*H+32 ) >> 6;
1773 V = ( 5*V+32 ) >> 6;
1774 }
1775
1776 a = 16*(src1[0] + src2[16] + 1) - 7*(V+H);
1777 for(j=16; j>0; --j) {
1778 int b = a;
1779 a += V;
1780 for(i=-16; i<0; i+=4) {
1781 src[16+i] = cm[ (b ) >> 5 ];
1782 src[17+i] = cm[ (b+ H) >> 5 ];
1783 src[18+i] = cm[ (b+2*H) >> 5 ];
1784 src[19+i] = cm[ (b+3*H) >> 5 ];
1785 b += 4*H;
1786 }
1787 src += stride;
1788 }
1789 }
1790
1791 static void pred16x16_plane_c(uint8_t *src, int stride){
1792 pred16x16_plane_compat_c(src, stride, 0);
1793 }
1794
1795 static void pred8x8_vertical_c(uint8_t *src, int stride){
1796 int i;
1797 const uint32_t a= ((uint32_t*)(src-stride))[0];
1798 const uint32_t b= ((uint32_t*)(src-stride))[1];
1799
1800 for(i=0; i<8; i++){
1801 ((uint32_t*)(src+i*stride))[0]= a;
1802 ((uint32_t*)(src+i*stride))[1]= b;
1803 }
1804 }
1805
1806 static void pred8x8_horizontal_c(uint8_t *src, int stride){
1807 int i;
1808
1809 for(i=0; i<8; i++){
1810 ((uint32_t*)(src+i*stride))[0]=
1811 ((uint32_t*)(src+i*stride))[1]= src[-1+i*stride]*0x01010101;
1812 }
1813 }
1814
1815 static void pred8x8_128_dc_c(uint8_t *src, int stride){
1816 int i;
1817
1818 for(i=0; i<4; i++){
1819 ((uint32_t*)(src+i*stride))[0]=
1820 ((uint32_t*)(src+i*stride))[1]= 0x01010101U*128U;
1821 }
1822 for(i=4; i<8; i++){
1823 ((uint32_t*)(src+i*stride))[0]=
1824 ((uint32_t*)(src+i*stride))[1]= 0x01010101U*128U;
1825 }
1826 }
1827
1828 static void pred8x8_left_dc_c(uint8_t *src, int stride){
1829 int i;
1830 int dc0, dc2;
1831
1832 dc0=dc2=0;
1833 for(i=0;i<4; i++){
1834 dc0+= src[-1+i*stride];
1835 dc2+= src[-1+(i+4)*stride];
1836 }
1837 dc0= 0x01010101*((dc0 + 2)>>2);
1838 dc2= 0x01010101*((dc2 + 2)>>2);
1839
1840 for(i=0; i<4; i++){
1841 ((uint32_t*)(src+i*stride))[0]=
1842 ((uint32_t*)(src+i*stride))[1]= dc0;
1843 }
1844 for(i=4; i<8; i++){
1845 ((uint32_t*)(src+i*stride))[0]=
1846 ((uint32_t*)(src+i*stride))[1]= dc2;
1847 }
1848 }
1849
1850 static void pred8x8_top_dc_c(uint8_t *src, int stride){
1851 int i;
1852 int dc0, dc1;
1853
1854 dc0=dc1=0;
1855 for(i=0;i<4; i++){
1856 dc0+= src[i-stride];
1857 dc1+= src[4+i-stride];
1858 }
1859 dc0= 0x01010101*((dc0 + 2)>>2);
1860 dc1= 0x01010101*((dc1 + 2)>>2);
1861
1862 for(i=0; i<4; i++){
1863 ((uint32_t*)(src+i*stride))[0]= dc0;
1864 ((uint32_t*)(src+i*stride))[1]= dc1;
1865 }
1866 for(i=4; i<8; i++){
1867 ((uint32_t*)(src+i*stride))[0]= dc0;
1868 ((uint32_t*)(src+i*stride))[1]= dc1;
1869 }
1870 }
1871
1872
1873 static void pred8x8_dc_c(uint8_t *src, int stride){
1874 int i;
1875 int dc0, dc1, dc2, dc3;
1876
1877 dc0=dc1=dc2=0;
1878 for(i=0;i<4; i++){
1879 dc0+= src[-1+i*stride] + src[i-stride];
1880 dc1+= src[4+i-stride];
1881 dc2+= src[-1+(i+4)*stride];
1882 }
1883 dc3= 0x01010101*((dc1 + dc2 + 4)>>3);
1884 dc0= 0x01010101*((dc0 + 4)>>3);
1885 dc1= 0x01010101*((dc1 + 2)>>2);
1886 dc2= 0x01010101*((dc2 + 2)>>2);
1887
1888 for(i=0; i<4; i++){
1889 ((uint32_t*)(src+i*stride))[0]= dc0;
1890 ((uint32_t*)(src+i*stride))[1]= dc1;
1891 }
1892 for(i=4; i<8; i++){
1893 ((uint32_t*)(src+i*stride))[0]= dc2;
1894 ((uint32_t*)(src+i*stride))[1]= dc3;
1895 }
1896 }
1897
1898 static void pred8x8_plane_c(uint8_t *src, int stride){
1899 int j, k;
1900 int a;
1901 uint8_t *cm = cropTbl + MAX_NEG_CROP;
1902 const uint8_t * const src0 = src+3-stride;
1903 const uint8_t *src1 = src+4*stride-1;
1904 const uint8_t *src2 = src1-2*stride; // == src+2*stride-1;
1905 int H = src0[1] - src0[-1];
1906 int V = src1[0] - src2[ 0];
1907 for(k=2; k<=4; ++k) {
1908 src1 += stride; src2 -= stride;
1909 H += k*(src0[k] - src0[-k]);
1910 V += k*(src1[0] - src2[ 0]);
1911 }
1912 H = ( 17*H+16 ) >> 5;
1913 V = ( 17*V+16 ) >> 5;
1914
1915 a = 16*(src1[0] + src2[8]+1) - 3*(V+H);
1916 for(j=8; j>0; --j) {
1917 int b = a;
1918 a += V;
1919 src[0] = cm[ (b ) >> 5 ];
1920 src[1] = cm[ (b+ H) >> 5 ];
1921 src[2] = cm[ (b+2*H) >> 5 ];
1922 src[3] = cm[ (b+3*H) >> 5 ];
1923 src[4] = cm[ (b+4*H) >> 5 ];
1924 src[5] = cm[ (b+5*H) >> 5 ];
1925 src[6] = cm[ (b+6*H) >> 5 ];
1926 src[7] = cm[ (b+7*H) >> 5 ];
1927 src += stride;
1928 }
1929 }
1930
1931 static inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square, int chroma_height, int delta, int list,
1932 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1933 int src_x_offset, int src_y_offset,
1934 qpel_mc_func *qpix_op, h264_chroma_mc_func chroma_op){
1935 MpegEncContext * const s = &h->s;
1936 const int mx= h->mv_cache[list][ scan8[n] ][0] + src_x_offset*8;
1937 const int my= h->mv_cache[list][ scan8[n] ][1] + src_y_offset*8;
1938 const int luma_xy= (mx&3) + ((my&3)<<2);
1939 uint8_t * src_y = pic->data[0] + (mx>>2) + (my>>2)*s->linesize;
1940 uint8_t * src_cb= pic->data[1] + (mx>>3) + (my>>3)*s->uvlinesize;
1941 uint8_t * src_cr= pic->data[2] + (mx>>3) + (my>>3)*s->uvlinesize;
1942 int extra_width= (s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16; //FIXME increase edge?, IMHO not worth it
1943 int extra_height= extra_width;
1944 int emu=0;
1945 const int full_mx= mx>>2;
1946 const int full_my= my>>2;
1947
1948 assert(pic->data[0]);
1949
1950 if(mx&7) extra_width -= 3;
1951 if(my&7) extra_height -= 3;
1952
1953 if( full_mx < 0-extra_width
1954 || full_my < 0-extra_height
1955 || full_mx + 16/*FIXME*/ > s->width + extra_width
1956 || full_my + 16/*FIXME*/ > s->height + extra_height){
1957 ff_emulated_edge_mc(s->edge_emu_buffer, src_y - 2 - 2*s->linesize, s->linesize, 16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, s->width, s->height);
1958 src_y= s->edge_emu_buffer + 2 + 2*s->linesize;
1959 emu=1;
1960 }
1961
1962 qpix_op[luma_xy](dest_y, src_y, s->linesize); //FIXME try variable height perhaps?
1963 if(!square){
1964 qpix_op[luma_xy](dest_y + delta, src_y + delta, s->linesize);
1965 }
1966
1967 if(s->flags&CODEC_FLAG_GRAY) return;
1968
1969 if(emu){
1970 ff_emulated_edge_mc(s->edge_emu_buffer, src_cb, s->uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), s->width>>1, s->height>>1);
1971 src_cb= s->edge_emu_buffer;
1972 }
1973 chroma_op(dest_cb, src_cb, s->uvlinesize, chroma_height, mx&7, my&7);
1974
1975 if(emu){
1976 ff_emulated_edge_mc(s->edge_emu_buffer, src_cr, s->uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), s->width>>1, s->height>>1);
1977 src_cr= s->edge_emu_buffer;
1978 }
1979 chroma_op(dest_cr, src_cr, s->uvlinesize, chroma_height, mx&7, my&7);
1980 }
1981
1982 static inline void mc_part(H264Context *h, int n, int square, int chroma_height, int delta,
1983 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1984 int x_offset, int y_offset,
1985 qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put,
1986 qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg,
1987 int list0, int list1){
1988 MpegEncContext * const s = &h->s;
1989 qpel_mc_func *qpix_op= qpix_put;
1990 h264_chroma_mc_func chroma_op= chroma_put;
1991
1992 dest_y += 2*x_offset + 2*y_offset*s-> linesize;
1993 dest_cb += x_offset + y_offset*s->uvlinesize;
1994 dest_cr += x_offset + y_offset*s->uvlinesize;
1995 x_offset += 8*s->mb_x;
1996 y_offset += 8*s->mb_y;
1997
1998 if(list0){
1999 Picture *ref= &h->ref_list[0][ h->ref_cache[0][ scan8[n] ] ];
2000 mc_dir_part(h, ref, n, square, chroma_height, delta, 0,
2001 dest_y, dest_cb, dest_cr, x_offset, y_offset,
2002 qpix_op, chroma_op);
2003
2004 qpix_op= qpix_avg;
2005 chroma_op= chroma_avg;
2006 }
2007
2008 if(list1){
2009 Picture *ref= &h->ref_list[1][ h->ref_cache[1][ scan8[n] ] ];
2010 mc_dir_part(h, ref, n, square, chroma_height, delta, 1,
2011 dest_y, dest_cb, dest_cr, x_offset, y_offset,
2012 qpix_op, chroma_op);
2013 }
2014 }
2015
2016 static void hl_motion(H264Context *h, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2017 qpel_mc_func (*qpix_put)[16], h264_chroma_mc_func (*chroma_put),
2018 qpel_mc_func (*qpix_avg)[16], h264_chroma_mc_func (*chroma_avg)){
2019 MpegEncContext * const s = &h->s;
2020 const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
2021 const int mb_type= s->current_picture.mb_type[mb_xy];
2022
2023 assert(IS_INTER(mb_type));
2024
2025 if(IS_16X16(mb_type)){
2026 mc_part(h, 0, 1, 8, 0, dest_y, dest_cb, dest_cr, 0, 0,
2027 qpix_put[0], chroma_put[0], qpix_avg[0], chroma_avg[0],
2028 IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
2029 }else if(IS_16X8(mb_type)){
2030 mc_part(h, 0, 0, 4, 8, dest_y, dest_cb, dest_cr, 0, 0,
2031 qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0],
2032 IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
2033 mc_part(h, 8, 0, 4, 8, dest_y, dest_cb, dest_cr, 0, 4,
2034 qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0],
2035 IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1));
2036 }else if(IS_8X16(mb_type)){
2037 mc_part(h, 0, 0, 8, 8*s->linesize, dest_y, dest_cb, dest_cr, 0, 0,
2038 qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
2039 IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
2040 mc_part(h, 4, 0, 8, 8*s->linesize, dest_y, dest_cb, dest_cr, 4, 0,
2041 qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
2042 IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1));
2043 }else{
2044 int i;
2045
2046 assert(IS_8X8(mb_type));
2047
2048 for(i=0; i<4; i++){
2049 const int sub_mb_type= h->sub_mb_type[i];
2050 const int n= 4*i;
2051 int x_offset= (i&1)<<2;
2052 int y_offset= (i&2)<<1;
2053
2054 if(IS_SUB_8X8(sub_mb_type)){
2055 mc_part(h, n, 1, 4, 0, dest_y, dest_cb, dest_cr, x_offset, y_offset,
2056 qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
2057 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
2058 }else if(IS_SUB_8X4(sub_mb_type)){
2059 mc_part(h, n , 0, 2, 4, dest_y, dest_cb, dest_cr, x_offset, y_offset,
2060 qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1],
2061 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
2062 mc_part(h, n+2, 0, 2, 4, dest_y, dest_cb, dest_cr, x_offset, y_offset+2,
2063 qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1],
2064 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
2065 }else if(IS_SUB_4X8(sub_mb_type)){
2066 mc_part(h, n , 0, 4, 4*s->linesize, dest_y, dest_cb, dest_cr, x_offset, y_offset,
2067 qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
2068 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
2069 mc_part(h, n+1, 0, 4, 4*s->linesize, dest_y, dest_cb, dest_cr, x_offset+2, y_offset,
2070 qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
2071 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
2072 }else{
2073 int j;
2074 assert(IS_SUB_4X4(sub_mb_type));
2075 for(j=0; j<4; j++){
2076 int sub_x_offset= x_offset + 2*(j&1);
2077 int sub_y_offset= y_offset + (j&2);
2078 mc_part(h, n+j, 1, 2, 0, dest_y, dest_cb, dest_cr, sub_x_offset, sub_y_offset,
2079 qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
2080 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
2081 }
2082 }
2083 }
2084 }
2085 }
2086
2087 static void decode_init_vlc(H264Context *h){
2088 static int done = 0;
2089
2090 if (!done) {
2091 int i;
2092 done = 1;
2093
2094 init_vlc(&chroma_dc_coeff_token_vlc, CHROMA_DC_COEFF_TOKEN_VLC_BITS, 4*5,
2095 &chroma_dc_coeff_token_len [0], 1, 1,
2096 &chroma_dc_coeff_token_bits[0], 1, 1, 1);
2097
2098 for(i=0; i<4; i++){
2099 init_vlc(&coeff_token_vlc[i], COEFF_TOKEN_VLC_BITS, 4*17,
2100 &coeff_token_len [i][0], 1, 1,
2101 &coeff_token_bits[i][0], 1, 1, 1);
2102 }
2103
2104 for(i=0; i<3; i++){
2105 init_vlc(&chroma_dc_total_zeros_vlc[i], CHROMA_DC_TOTAL_ZEROS_VLC_BITS, 4,
2106 &chroma_dc_total_zeros_len [i][0], 1, 1,
2107 &chroma_dc_total_zeros_bits[i][0], 1, 1, 1);
2108 }
2109 for(i=0; i<15; i++){
2110 init_vlc(&total_zeros_vlc[i], TOTAL_ZEROS_VLC_BITS, 16,
2111 &total_zeros_len [i][0], 1, 1,
2112 &total_zeros_bits[i][0], 1, 1, 1);
2113 }
2114
2115 for(i=0; i<6; i++){
2116 init_vlc(&run_vlc[i], RUN_VLC_BITS, 7,
2117 &run_len [i][0], 1, 1,
2118 &run_bits[i][0], 1, 1, 1);
2119 }
2120 init_vlc(&run7_vlc, RUN7_VLC_BITS, 16,
2121 &run_len [6][0], 1, 1,
2122 &run_bits[6][0], 1, 1, 1);
2123 }
2124 }
2125
2126 /**
2127 * Sets the intra prediction function pointers.
2128 */
2129 static void init_pred_ptrs(H264Context *h){
2130 // MpegEncContext * const s = &h->s;
2131
2132 h->pred4x4[VERT_PRED ]= pred4x4_vertical_c;
2133 h->pred4x4[HOR_PRED ]= pred4x4_horizontal_c;
2134 h->pred4x4[DC_PRED ]= pred4x4_dc_c;
2135 h->pred4x4[DIAG_DOWN_LEFT_PRED ]= pred4x4_down_left_c;
2136 h->pred4x4[DIAG_DOWN_RIGHT_PRED]= pred4x4_down_right_c;
2137 h->pred4x4[VERT_RIGHT_PRED ]= pred4x4_vertical_right_c;
2138 h->pred4x4[HOR_DOWN_PRED ]= pred4x4_horizontal_down_c;
2139 h->pred4x4[VERT_LEFT_PRED ]= pred4x4_vertical_left_c;
2140 h->pred4x4[HOR_UP_PRED ]= pred4x4_horizontal_up_c;
2141 h->pred4x4[LEFT_DC_PRED ]= pred4x4_left_dc_c;
2142 h->pred4x4[TOP_DC_PRED ]= pred4x4_top_dc_c;
2143 h->pred4x4[DC_128_PRED ]= pred4x4_128_dc_c;
2144
2145 h->pred8x8[DC_PRED8x8 ]= pred8x8_dc_c;
2146 h->pred8x8[VERT_PRED8x8 ]= pred8x8_vertical_c;
2147 h->pred8x8[HOR_PRED8x8 ]= pred8x8_horizontal_c;
2148 h->pred8x8[PLANE_PRED8x8 ]= pred8x8_plane_c;
2149 h->pred8x8[LEFT_DC_PRED8x8]= pred8x8_left_dc_c;
2150 h->pred8x8[TOP_DC_PRED8x8 ]= pred8x8_top_dc_c;
2151 h->pred8x8[DC_128_PRED8x8 ]= pred8x8_128_dc_c;
2152
2153 h->pred16x16[DC_PRED8x8 ]= pred16x16_dc_c;
2154 h->pred16x16[VERT_PRED8x8 ]= pred16x16_vertical_c;
2155 h->pred16x16[HOR_PRED8x8 ]= pred16x16_horizontal_c;
2156 h->pred16x16[PLANE_PRED8x8 ]= pred16x16_plane_c;
2157 h->pred16x16[LEFT_DC_PRED8x8]= pred16x16_left_dc_c;
2158 h->pred16x16[TOP_DC_PRED8x8 ]= pred16x16_top_dc_c;
2159 h->pred16x16[DC_128_PRED8x8 ]= pred16x16_128_dc_c;
2160 }
2161
2162 static void free_tables(H264Context *h){
2163 av_freep(&h->intra4x4_pred_mode);
2164 av_freep(&h->chroma_pred_mode_table);
2165 av_freep(&h->cbp_table);
2166 av_freep(&h->mvd_table[0]);
2167 av_freep(&h->mvd_table[1]);
2168 av_freep(&h->non_zero_count);
2169 av_freep(&h->slice_table_base);
2170 av_freep(&h->top_border);
2171 h->slice_table= NULL;
2172
2173 av_freep(&h->mb2b_xy);
2174 av_freep(&h->mb2b8_xy);
2175 }
2176
2177 /**
2178 * allocates tables.
2179 * needs widzh/height
2180 */
2181 static int alloc_tables(H264Context *h){
2182 MpegEncContext * const s = &h->s;
2183 const int big_mb_num= s->mb_stride * (s->mb_height+1);
2184 int x,y;
2185
2186 CHECKED_ALLOCZ(h->intra4x4_pred_mode, big_mb_num * 8 * sizeof(uint8_t))
2187
2188 CHECKED_ALLOCZ(h->non_zero_count , big_mb_num * 16 * sizeof(uint8_t))
2189 CHECKED_ALLOCZ(h->slice_table_base , big_mb_num * sizeof(uint8_t))
2190 CHECKED_ALLOCZ(h->top_border , s->mb_width * (16+8+8) * sizeof(uint8_t))
2191 CHECKED_ALLOCZ(h->cbp_table, big_mb_num * sizeof(uint16_t))
2192
2193 if( h->pps.cabac ) {
2194 CHECKED_ALLOCZ(h->chroma_pred_mode_table, big_mb_num * sizeof(uint8_t))
2195 CHECKED_ALLOCZ(h->mvd_table[0], 32*big_mb_num * sizeof(uint16_t));
2196 CHECKED_ALLOCZ(h->mvd_table[1], 32*big_mb_num * sizeof(uint16_t));
2197 }
2198
2199 memset(h->slice_table_base, -1, big_mb_num * sizeof(uint8_t));
2200 h->slice_table= h->slice_table_base + s->mb_stride + 1;
2201
2202 CHECKED_ALLOCZ(h->mb2b_xy , big_mb_num * sizeof(uint16_t));
2203 CHECKED_ALLOCZ(h->mb2b8_xy , big_mb_num * sizeof(uint16_t));
2204 for(y=0; y<s->mb_height; y++){
2205 for(x=0; x<s->mb_width; x++){
2206 const int mb_xy= x + y*s->mb_stride;
2207 const int b_xy = 4*x + 4*y*h->b_stride;
2208 const int b8_xy= 2*x + 2*y*h->b8_stride;
2209
2210 h->mb2b_xy [mb_xy]= b_xy;
2211 h->mb2b8_xy[mb_xy]= b8_xy;
2212 }
2213 }
2214
2215 return 0;
2216 fail:
2217 free_tables(h);
2218 return -1;
2219 }
2220
2221 static void common_init(H264Context *h){
2222 MpegEncContext * const s = &h->s;
2223
2224 s->width = s->avctx->width;
2225 s->height = s->avctx->height;
2226 s->codec_id= s->avctx->codec->id;
2227
2228 init_pred_ptrs(h);
2229
2230 s->unrestricted_mv=1;
2231 s->decode=1; //FIXME
2232 }
2233
2234 static int decode_init(AVCodecContext *avctx){
2235 H264Context *h= avctx->priv_data;
2236 MpegEncContext * const s = &h->s;
2237
2238 MPV_decode_defaults(s);
2239
2240 s->avctx = avctx;
2241 common_init(h);
2242
2243 s->out_format = FMT_H264;
2244 s->workaround_bugs= avctx->workaround_bugs;
2245
2246 // set defaults
2247 // s->decode_mb= ff_h263_decode_mb;
2248 s->low_delay= 1;
2249 avctx->pix_fmt= PIX_FMT_YUV420P;
2250
2251 decode_init_vlc(h);
2252
2253 if(avctx->codec_tag != 0x31637661 && avctx->codec_tag != 0x31435641) // avc1
2254 h->is_avc = 0;
2255 else {
2256 if((avctx->extradata_size == 0) || (avctx->extradata == NULL)) {
2257 av_log(avctx, AV_LOG_ERROR, "AVC codec requires avcC data\n");
2258 return -1;
2259 }
2260 h->is_avc = 1;
2261 h->got_avcC = 0;
2262 }
2263
2264 return 0;
2265 }
2266
2267 static void frame_start(H264Context *h){
2268 MpegEncContext * const s = &h->s;
2269 int i;
2270
2271 MPV_frame_start(s, s->avctx);
2272 ff_er_frame_start(s);
2273 h->mmco_index=0;
2274
2275 assert(s->linesize && s->uvlinesize);
2276
2277 for(i=0; i<16; i++){
2278 h->block_offset[i]= 4*((scan8[i] - scan8[0])&7) + 4*s->linesize*((scan8[i] - scan8[0])>>3);
2279 h->chroma_subblock_offset[i]= 2*((scan8[i] - scan8[0])&7) + 2*s->uvlinesize*((scan8[i] - scan8[0])>>3);
2280 }
2281 for(i=0; i<4; i++){
2282 h->block_offset[16+i]=
2283 h->block_offset[20+i]= 4*((scan8[i] - scan8[0])&7) + 4*s->uvlinesize*((scan8[i] - scan8[0])>>3);
2284 }
2285
2286 // s->decode= (s->flags&CODEC_FLAG_PSNR) || !s->encoding || s->current_picture.reference /*|| h->contains_intra*/ || 1;
2287 }
2288
2289 static inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize){
2290 MpegEncContext * const s = &h->s;
2291 int i;
2292
2293 src_y -= linesize;
2294 src_cb -= uvlinesize;
2295 src_cr -= uvlinesize;
2296
2297 h->left_border[0]= h->top_border[s->mb_x][15];
2298 for(i=1; i<17; i++){
2299 h->left_border[i]= src_y[15+i* linesize];
2300 }
2301
2302 *(uint64_t*)(h->top_border[s->mb_x]+0)= *(uint64_t*)(src_y + 16*linesize);
2303 *(uint64_t*)(h->top_border[s->mb_x]+8)= *(uint64_t*)(src_y +8+16*linesize);
2304
2305 if(!(s->flags&CODEC_FLAG_GRAY)){
2306 h->left_border[17 ]= h->top_border[s->mb_x][16+7];
2307 h->left_border[17+9]= h->top_border[s->mb_x][24+7];
2308 for(i=1; i<9; i++){
2309 h->left_border[i+17 ]= src_cb[7+i*uvlinesize];
2310 h->left_border[i+17+9]= src_cr[7+i*uvlinesize];
2311 }
2312 *(uint64_t*)(h->top_border[s->mb_x]+16)= *(uint64_t*)(src_cb+8*uvlinesize);
2313 *(uint64_t*)(h->top_border[s->mb_x]+24)= *(uint64_t*)(src_cr+8*uvlinesize);
2314 }
2315 }
2316
2317 static inline void xchg_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg){
2318 MpegEncContext * const s = &h->s;
2319 int temp8, i;
2320 uint64_t temp64;
2321 int deblock_left = (s->mb_x > 0);
2322 int deblock_top = (s->mb_y > 0);
2323
2324 src_y -= linesize + 1;
2325 src_cb -= uvlinesize + 1;
2326 src_cr -= uvlinesize + 1;
2327
2328 #define XCHG(a,b,t,xchg)\
2329 t= a;\
2330 if(xchg)\
2331 a= b;\
2332 b= t;
2333
2334 if(deblock_left){
2335 for(i = !deblock_top; i<17; i++){
2336 XCHG(h->left_border[i ], src_y [i* linesize], temp8, xchg);
2337 }
2338 }
2339
2340 if(deblock_top){
2341 XCHG(*(uint64_t*)(h->top_border[s->mb_x]+0), *(uint64_t*)(src_y +1), temp64, xchg);
2342 XCHG(*(uint64_t*)(h->top_border[s->mb_x]+8), *(uint64_t*)(src_y +9), temp64, 1);
2343 }
2344
2345 if(!(s->flags&CODEC_FLAG_GRAY)){
2346 if(deblock_left){
2347 for(i = !deblock_top; i<9; i++){
2348 XCHG(h->left_border[i+17 ], src_cb[i*uvlinesize], temp8, xchg);
2349 XCHG(h->left_border[i+17+9], src_cr[i*uvlinesize], temp8, xchg);
2350 }
2351 }
2352 if(deblock_top){
2353 XCHG(*(uint64_t*)(h->top_border[s->mb_x]+16), *(uint64_t*)(src_cb+1), temp64, 1);
2354 XCHG(*(uint64_t*)(h->top_border[s->mb_x]+24), *(uint64_t*)(src_cr+1), temp64, 1);
2355 }
2356 }
2357 }
2358
2359 static void hl_decode_mb(H264Context *h){
2360 MpegEncContext * const s = &h->s;
2361 const int mb_x= s->mb_x;
2362 const int mb_y= s->mb_y;
2363 const int mb_xy= mb_x + mb_y*s->mb_stride;
2364 const int mb_type= s->current_picture.mb_type[mb_xy];
2365 uint8_t *dest_y, *dest_cb, *dest_cr;
2366 int linesize, uvlinesize /*dct_offset*/;
2367 int i;
2368
2369 if(!s->decode)
2370 return;
2371
2372 if(s->mb_skiped){
2373 }
2374
2375 dest_y = s->current_picture.data[0] + (mb_y * 16* s->linesize ) + mb_x * 16;
2376 dest_cb = s->current_picture.data[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
2377 dest_cr = s->current_picture.data[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
2378
2379 if (h->mb_field_decoding_flag) {
2380 linesize = s->linesize * 2;
2381 uvlinesize = s->uvlinesize * 2;
2382 if(mb_y&1){ //FIXME move out of this func?
2383 dest_y -= s->linesize*15;
2384 dest_cb-= s->linesize*7;
2385 dest_cr-= s->linesize*7;
2386 }
2387 } else {
2388 linesize = s->linesize;
2389 uvlinesize = s->uvlinesize;
2390 // dct_offset = s->linesize * 16;
2391 }
2392
2393 if(IS_INTRA(mb_type)){
2394 if(h->deblocking_filter)
2395 xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 1);
2396
2397 if(!(s->flags&CODEC_FLAG_GRAY)){
2398 h->pred8x8[ h->chroma_pred_mode ](dest_cb, uvlinesize);
2399 h->pred8x8[ h->chroma_pred_mode ](dest_cr, uvlinesize);
2400 }
2401
2402 if(IS_INTRA4x4(mb_type)){
2403 if(!s->encoding){
2404 for(i=0; i<16; i++){
2405 uint8_t * const ptr= dest_y + h->block_offset[i];
2406 uint8_t *topright;
2407 const int dir= h->intra4x4_pred_mode_cache[ scan8[i] ];
2408 int tr;
2409
2410 if(dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED){
2411 const int topright_avail= (h->topright_samples_available<<i)&0x8000;
2412 assert(mb_y || linesize <= h->block_offset[i]);
2413 if(!topright_avail){
2414 tr= ptr[3 - linesize]*0x01010101;
2415 topright= (uint8_t*) &tr;
2416 }else if(i==5 && h->deblocking_filter){
2417 tr= *(uint32_t*)h->top_border[mb_x+1];
2418 topright= (uint8_t*) &tr;
2419 }else
2420 topright= ptr + 4 - linesize;
2421 }else
2422 topright= NULL;
2423
2424 h->pred4x4[ dir ](ptr, topright, linesize);
2425 if(h->non_zero_count_cache[ scan8[i] ]){
2426 if(s->codec_id == CODEC_ID_H264)
2427 s->dsp.h264_idct_add(ptr, h->mb + i*16, linesize);
2428 else
2429 svq3_add_idct_c(ptr, h->mb + i*16, linesize, s->qscale, 0);
2430 }
2431 }
2432 }
2433 }else{
2434 h->pred16x16[ h->intra16x16_pred_mode ](dest_y , linesize);
2435 if(s->codec_id == CODEC_ID_H264)
2436 h264_luma_dc_dequant_idct_c(h->mb, s->qscale);
2437 else
2438 svq3_luma_dc_dequant_idct_c(h->mb, s->qscale);
2439 }
2440 if(h->deblocking_filter)
2441 xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 0);
2442 }else if(s->codec_id == CODEC_ID_H264){
2443 hl_motion(h, dest_y, dest_cb, dest_cr,
2444 s->dsp.put_h264_qpel_pixels_tab, s->dsp.put_h264_chroma_pixels_tab,
2445 s->dsp.avg_h264_qpel_pixels_tab, s->dsp.avg_h264_chroma_pixels_tab);
2446 }
2447
2448
2449 if(!IS_INTRA4x4(mb_type)){
2450 if(s->codec_id == CODEC_ID_H264){
2451 for(i=0; i<16; i++){
2452 if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){ //FIXME benchmark weird rule, & below
2453 uint8_t * const ptr= dest_y + h->block_offset[i];
2454 s->dsp.h264_idct_add(ptr, h->mb + i*16, linesize);
2455 }
2456 }
2457 }else{
2458 for(i=0; i<16; i++){
2459 if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){ //FIXME benchmark weird rule, & below
2460 uint8_t * const ptr= dest_y + h->block_offset[i];
2461 svq3_add_idct_c(ptr, h->mb + i*16, linesize, s->qscale, IS_INTRA(mb_type) ? 1 : 0);
2462 }
2463 }
2464 }
2465 }
2466
2467 if(!(s->flags&CODEC_FLAG_GRAY)){
2468 chroma_dc_dequant_idct_c(h->mb + 16*16, h->chroma_qp);
2469 chroma_dc_dequant_idct_c(h->mb + 16*16+4*16, h->chroma_qp);
2470 if(s->codec_id == CODEC_ID_H264){
2471 for(i=16; i<16+4; i++){
2472 if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){
2473 uint8_t * const ptr= dest_cb + h->block_offset[i];
2474 s->dsp.h264_idct_add(ptr, h->mb + i*16, uvlinesize);
2475 }
2476 }
2477 for(i=20; i<20+4; i++){
2478 if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){
2479 uint8_t * const ptr= dest_cr + h->block_offset[i];
2480 s->dsp.h264_idct_add(ptr, h->mb + i*16, uvlinesize);
2481 }
2482 }
2483 }else{
2484 for(i=16; i<16+4; i++){
2485 if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){
2486 uint8_t * const ptr= dest_cb + h->block_offset[i];
2487 svq3_add_idct_c(ptr, h->mb + i*16, uvlinesize, chroma_qp[s->qscale + 12] - 12, 2);
2488 }
2489 }
2490 for(i=20; i<20+4; i++){
2491 if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){
2492 uint8_t * const ptr= dest_cr + h->block_offset[i];
2493 svq3_add_idct_c(ptr, h->mb + i*16, uvlinesize, chroma_qp[s->qscale + 12] - 12, 2);
2494 }
2495 }
2496 }
2497 }
2498 if(h->deblocking_filter) {
2499 backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
2500 filter_mb(h, mb_x, mb_y, dest_y, dest_cb, dest_cr);
2501 }
2502 }
2503
2504 /**
2505 * fills the default_ref_list.
2506 */
2507 static int fill_default_ref_list(H264Context *h){
2508 MpegEncContext * const s = &h->s;
2509 int i;
2510 Picture sorted_short_ref[16];
2511
2512 if(h->slice_type==B_TYPE){
2513 int out_i;
2514 int limit= -1;
2515
2516 for(out_i=0; out_i<h->short_ref_count; out_i++){
2517 int best_i=-1;
2518 int best_poc=INT_MAX;
2519
2520 for(i=0; i<h->short_ref_count; i++){
2521 const int poc= h->short_ref[i]->poc;
2522 if(poc > limit && poc < best_poc){
2523 best_poc= poc;
2524 best_i= i;
2525 }
2526 }
2527
2528 assert(best_i != -1);
2529
2530 limit= best_poc;
2531 sorted_short_ref[out_i]= *h->short_ref[best_i];
2532 }
2533 }
2534
2535 if(s->picture_structure == PICT_FRAME){
2536 if(h->slice_type==B_TYPE){
2537 const int current_poc= s->current_picture_ptr->poc;
2538 int list;
2539
2540 for(list=0; list<2; list++){
2541 int index=0;
2542
2543 for(i=0; i<h->short_ref_count && index < h->ref_count[list]; i++){
2544 const int i2= list ? h->short_ref_count - i - 1 : i;
2545 const int poc= sorted_short_ref[i2].poc;
2546
2547 if(sorted_short_ref[i2].reference != 3) continue; //FIXME refernce field shit
2548
2549 if((list==1 && poc > current_poc) || (list==0 && poc < current_poc)){
2550 h->default_ref_list[list][index ]= sorted_short_ref[i2];
2551 h->default_ref_list[list][index++].pic_id= sorted_short_ref[i2].frame_num;
2552 }
2553 }
2554
2555 for(i=0; i<h->long_ref_count && index < h->ref_count[ list ]; i++){
2556 if(h->long_ref[i]->reference != 3) continue;
2557
2558 h->default_ref_list[ list ][index ]= *h->long_ref[i];
2559 h->default_ref_list[ list ][index++].pic_id= i;;
2560 }
2561
2562 if(h->long_ref_count > 1 && h->short_ref_count==0){
2563 Picture temp= h->default_ref_list[1][0];
2564 h->default_ref_list[1][0] = h->default_ref_list[1][1];
2565 h->default_ref_list[1][0] = temp;
2566 }
2567
2568 if(index < h->ref_count[ list ])
2569 memset(&h->default_ref_list[list][index], 0, sizeof(Picture)*(h->ref_count[ list ] - index));
2570 }
2571 }else{
2572 int index=0;
2573 for(i=0; i<h->short_ref_count && index < h->ref_count[0]; i++){
2574 if(h->short_ref[i]->reference != 3) continue; //FIXME refernce field shit
2575 h->default_ref_list[0][index ]= *h->short_ref[i];
2576 h->default_ref_list[0][index++].pic_id= h->short_ref[i]->frame_num;
2577 }
2578 for(i=0; i<h->long_ref_count && index < h->ref_count[0]; i++){
2579 if(h->long_ref[i]->reference != 3) continue;
2580 h->default_ref_list[0][index ]= *h->long_ref[i];
2581 h->default_ref_list[0][index++].pic_id= i;;
2582 }
2583 if(index < h->ref_count[0])
2584 memset(&h->default_ref_list[0][index], 0, sizeof(Picture)*(h->ref_count[0] - index));
2585 }
2586 }else{ //FIELD
2587 if(h->slice_type==B_TYPE){
2588 }else{
2589 //FIXME second field balh
2590 }
2591 }
2592 return 0;
2593 }
2594
2595 static int decode_ref_pic_list_reordering(H264Context *h){
2596 MpegEncContext * const s = &h->s;
2597 int list;
2598
2599 if(h->slice_type==I_TYPE || h->slice_type==SI_TYPE) return 0; //FIXME move beofre func
2600
2601 for(list=0; list<2; list++){
2602 memcpy(h->ref_list[list], h->default_ref_list[list], sizeof(Picture)*h->ref_count[list]);
2603
2604 if(get_bits1(&s->gb)){
2605 int pred= h->curr_pic_num;
2606 int index;
2607
2608 for(index=0; ; index++){
2609 int reordering_of_pic_nums_idc= get_ue_golomb(&s->gb);
2610 int pic_id;
2611 int i;
2612
2613 if(reordering_of_pic_nums_idc==3)
2614 break;
2615
2616 if(index >= h->ref_count[list]){
2617 av_log(h->s.avctx, AV_LOG_ERROR, "reference count overflow\n");
2618 return -1;
2619 }
2620
2621 if(reordering_of_pic_nums_idc<3){
2622 if(reordering_of_pic_nums_idc<2){
2623 const int abs_diff_pic_num= get_ue_golomb(&s->gb) + 1;
2624
2625 if(abs_diff_pic_num >= h->max_pic_num){
2626 av_log(h->s.avctx, AV_LOG_ERROR, "abs_diff_pic_num overflow\n");
2627 return -1;
2628 }
2629
2630 if(reordering_of_pic_nums_idc == 0) pred-= abs_diff_pic_num;
2631 else pred+= abs_diff_pic_num;
2632 pred &= h->max_pic_num - 1;
2633
2634 for(i= h->ref_count[list]-1; i>=index; i--){
2635 if(h->ref_list[list][i].pic_id == pred && h->ref_list[list][i].long_ref==0)
2636 break;
2637 }
2638 }else{
2639 pic_id= get_ue_golomb(&s->gb); //long_term_pic_idx
2640
2641 for(i= h->ref_count[list]-1; i>=index; i--){
2642 if(h->ref_list[list][i].pic_id == pic_id && h->ref_list[list][i].long_ref==1)
2643 break;
2644 }
2645 }
2646
2647 if(i < index){
2648 av_log(h->s.avctx, AV_LOG_ERROR, "reference picture missing during reorder\n");
2649 memset(&h->ref_list[list][index], 0, sizeof(Picture)); //FIXME
2650 }else if(i > index){
2651 Picture tmp= h->ref_list[list][i];
2652 for(; i>index; i--){
2653 h->ref_list[list][i]= h->ref_list[list][i-1];
2654 }
2655 h->ref_list[list][index]= tmp;
2656 }
2657 }else{
2658 av_log(h->s.avctx, AV_LOG_ERROR, "illegal reordering_of_pic_nums_idc\n");
2659 return -1;
2660 }
2661 }
2662 }
2663
2664 if(h->slice_type!=B_TYPE) break;
2665 }
2666 return 0;
2667 }
2668
2669 static int pred_weight_table(H264Context *h){
2670 MpegEncContext * const s = &h->s;
2671 int list, i;
2672
2673 h->luma_log2_weight_denom= get_ue_golomb(&s->gb);
2674 h->chroma_log2_weight_denom= get_ue_golomb(&s->gb);
2675
2676 for(list=0; list<2; list++){
2677 for(i=0; i<h->ref_count[list]; i++){
2678 int luma_weight_flag, chroma_weight_flag;
2679
2680 luma_weight_flag= get_bits1(&s->gb);
2681 if(luma_weight_flag){
2682 h->luma_weight[list][i]= get_se_golomb(&s->gb);
2683 h->luma_offset[list][i]= get_se_golomb(&s->gb);
2684 }
2685
2686 chroma_weight_flag= get_bits1(&s->gb);