AVC (H264 in mp4 files, fourcc avc1) support
[libav.git] / libavcodec / h264.c
1 /*
2 * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
3 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 */
20
21 /**
22 * @file h264.c
23 * H.264 / AVC / MPEG4 part10 codec.
24 * @author Michael Niedermayer <michaelni@gmx.at>
25 */
26
27 #include "common.h"
28 #include "dsputil.h"
29 #include "avcodec.h"
30 #include "mpegvideo.h"
31 #include "h264data.h"
32 #include "golomb.h"
33
34 #include "cabac.h"
35
36 #undef NDEBUG
37 #include <assert.h>
38
39 #define interlaced_dct interlaced_dct_is_a_bad_name
40 #define mb_intra mb_intra_isnt_initalized_see_mb_type
41
42 #define LUMA_DC_BLOCK_INDEX 25
43 #define CHROMA_DC_BLOCK_INDEX 26
44
45 #define CHROMA_DC_COEFF_TOKEN_VLC_BITS 8
46 #define COEFF_TOKEN_VLC_BITS 8
47 #define TOTAL_ZEROS_VLC_BITS 9
48 #define CHROMA_DC_TOTAL_ZEROS_VLC_BITS 3
49 #define RUN_VLC_BITS 3
50 #define RUN7_VLC_BITS 6
51
52 #define MAX_SPS_COUNT 32
53 #define MAX_PPS_COUNT 256
54
55 #define MAX_MMCO_COUNT 66
56
57 /**
58 * Sequence parameter set
59 */
60 typedef struct SPS{
61
62 int profile_idc;
63 int level_idc;
64 int log2_max_frame_num; ///< log2_max_frame_num_minus4 + 4
65 int poc_type; ///< pic_order_cnt_type
66 int log2_max_poc_lsb; ///< log2_max_pic_order_cnt_lsb_minus4
67 int delta_pic_order_always_zero_flag;
68 int offset_for_non_ref_pic;
69 int offset_for_top_to_bottom_field;
70 int poc_cycle_length; ///< num_ref_frames_in_pic_order_cnt_cycle
71 int ref_frame_count; ///< num_ref_frames
72 int gaps_in_frame_num_allowed_flag;
73 int mb_width; ///< frame_width_in_mbs_minus1 + 1
74 int mb_height; ///< frame_height_in_mbs_minus1 + 1
75 int frame_mbs_only_flag;
76 int mb_aff; ///<mb_adaptive_frame_field_flag
77 int direct_8x8_inference_flag;
78 int crop; ///< frame_cropping_flag
79 int crop_left; ///< frame_cropping_rect_left_offset
80 int crop_right; ///< frame_cropping_rect_right_offset
81 int crop_top; ///< frame_cropping_rect_top_offset
82 int crop_bottom; ///< frame_cropping_rect_bottom_offset
83 int vui_parameters_present_flag;
84 AVRational sar;
85 int timing_info_present_flag;
86 uint32_t num_units_in_tick;
87 uint32_t time_scale;
88 int fixed_frame_rate_flag;
89 short offset_for_ref_frame[256]; //FIXME dyn aloc?
90 }SPS;
91
92 /**
93 * Picture parameter set
94 */
95 typedef struct PPS{
96 int sps_id;
97 int cabac; ///< entropy_coding_mode_flag
98 int pic_order_present; ///< pic_order_present_flag
99 int slice_group_count; ///< num_slice_groups_minus1 + 1
100 int mb_slice_group_map_type;
101 int ref_count[2]; ///< num_ref_idx_l0/1_active_minus1 + 1
102 int weighted_pred; ///< weighted_pred_flag
103 int weighted_bipred_idc;
104 int init_qp; ///< pic_init_qp_minus26 + 26
105 int init_qs; ///< pic_init_qs_minus26 + 26
106 int chroma_qp_index_offset;
107 int deblocking_filter_parameters_present; ///< deblocking_filter_parameters_present_flag
108 int constrained_intra_pred; ///< constrained_intra_pred_flag
109 int redundant_pic_cnt_present; ///< redundant_pic_cnt_present_flag
110 }PPS;
111
112 /**
113 * Memory management control operation opcode.
114 */
115 typedef enum MMCOOpcode{
116 MMCO_END=0,
117 MMCO_SHORT2UNUSED,
118 MMCO_LONG2UNUSED,
119 MMCO_SHORT2LONG,
120 MMCO_SET_MAX_LONG,
121 MMCO_RESET,
122 MMCO_LONG,
123 } MMCOOpcode;
124
125 /**
126 * Memory management control operation.
127 */
128 typedef struct MMCO{
129 MMCOOpcode opcode;
130 int short_frame_num;
131 int long_index;
132 } MMCO;
133
134 /**
135 * H264Context
136 */
137 typedef struct H264Context{
138 MpegEncContext s;
139 int nal_ref_idc;
140 int nal_unit_type;
141 #define NAL_SLICE 1
142 #define NAL_DPA 2
143 #define NAL_DPB 3
144 #define NAL_DPC 4
145 #define NAL_IDR_SLICE 5
146 #define NAL_SEI 6
147 #define NAL_SPS 7
148 #define NAL_PPS 8
149 #define NAL_PICTURE_DELIMITER 9
150 #define NAL_FILTER_DATA 10
151 uint8_t *rbsp_buffer;
152 int rbsp_buffer_size;
153
154 /**
155 * Used to parse AVC variant of h264
156 */
157 int is_avc; ///< this flag is != 0 if codec is avc1
158 int got_avcC; ///< flag used to parse avcC data only once
159 int nal_length_size; ///< Number of bytes used for nal length (1, 2 or 4)
160
161 int chroma_qp; //QPc
162
163 int prev_mb_skiped; //FIXME remove (IMHO not used)
164
165 //prediction stuff
166 int chroma_pred_mode;
167 int intra16x16_pred_mode;
168
169 int8_t intra4x4_pred_mode_cache[5*8];
170 int8_t (*intra4x4_pred_mode)[8];
171 void (*pred4x4 [9+3])(uint8_t *src, uint8_t *topright, int stride);//FIXME move to dsp?
172 void (*pred8x8 [4+3])(uint8_t *src, int stride);
173 void (*pred16x16[4+3])(uint8_t *src, int stride);
174 unsigned int topleft_samples_available;
175 unsigned int top_samples_available;
176 unsigned int topright_samples_available;
177 unsigned int left_samples_available;
178 uint8_t (*top_border)[16+2*8];
179 uint8_t left_border[17+2*9];
180
181 /**
182 * non zero coeff count cache.
183 * is 64 if not available.
184 */
185 uint8_t non_zero_count_cache[6*8];
186 uint8_t (*non_zero_count)[16];
187
188 /**
189 * Motion vector cache.
190 */
191 int16_t mv_cache[2][5*8][2];
192 int8_t ref_cache[2][5*8];
193 #define LIST_NOT_USED -1 //FIXME rename?
194 #define PART_NOT_AVAILABLE -2
195
196 /**
197 * is 1 if the specific list MV&references are set to 0,0,-2.
198 */
199 int mv_cache_clean[2];
200
201 int block_offset[16+8];
202 int chroma_subblock_offset[16]; //FIXME remove
203
204 uint16_t *mb2b_xy; //FIXME are these 4 a good idea?
205 uint16_t *mb2b8_xy;
206 int b_stride;
207 int b8_stride;
208
209 int halfpel_flag;
210 int thirdpel_flag;
211
212 int unknown_svq3_flag;
213 int next_slice_index;
214
215 SPS sps_buffer[MAX_SPS_COUNT];
216 SPS sps; ///< current sps
217
218 PPS pps_buffer[MAX_PPS_COUNT];
219 /**
220 * current pps
221 */
222 PPS pps; //FIXME move tp Picture perhaps? (->no) do we need that?
223
224 int slice_num;
225 uint8_t *slice_table_base;
226 uint8_t *slice_table; ///< slice_table_base + mb_stride + 1
227 int slice_type;
228 int slice_type_fixed;
229
230 //interlacing specific flags
231 int mb_field_decoding_flag;
232
233 int sub_mb_type[4];
234
235 //POC stuff
236 int poc_lsb;
237 int poc_msb;
238 int delta_poc_bottom;
239 int delta_poc[2];
240 int frame_num;
241 int prev_poc_msb; ///< poc_msb of the last reference pic for POC type 0
242 int prev_poc_lsb; ///< poc_lsb of the last reference pic for POC type 0
243 int frame_num_offset; ///< for POC type 2
244 int prev_frame_num_offset; ///< for POC type 2
245 int prev_frame_num; ///< frame_num of the last pic for POC type 1/2
246
247 /**
248 * frame_num for frames or 2*frame_num for field pics.
249 */
250 int curr_pic_num;
251
252 /**
253 * max_frame_num or 2*max_frame_num for field pics.
254 */
255 int max_pic_num;
256
257 //Weighted pred stuff
258 int luma_log2_weight_denom;
259 int chroma_log2_weight_denom;
260 int luma_weight[2][16];
261 int luma_offset[2][16];
262 int chroma_weight[2][16][2];
263 int chroma_offset[2][16][2];
264
265 //deblock
266 int deblocking_filter; ///< disable_deblocking_filter_idc with 1<->0
267 int slice_alpha_c0_offset;
268 int slice_beta_offset;
269
270 int redundant_pic_count;
271
272 int direct_spatial_mv_pred;
273
274 /**
275 * num_ref_idx_l0/1_active_minus1 + 1
276 */
277 int ref_count[2];// FIXME split for AFF
278 Picture *short_ref[16];
279 Picture *long_ref[16];
280 Picture default_ref_list[2][32];
281 Picture ref_list[2][32]; //FIXME size?
282 Picture field_ref_list[2][32]; //FIXME size?
283
284 /**
285 * memory management control operations buffer.
286 */
287 MMCO mmco[MAX_MMCO_COUNT];
288 int mmco_index;
289
290 int long_ref_count; ///< number of actual long term references
291 int short_ref_count; ///< number of actual short term references
292
293 //data partitioning
294 GetBitContext intra_gb;
295 GetBitContext inter_gb;
296 GetBitContext *intra_gb_ptr;
297 GetBitContext *inter_gb_ptr;
298
299 DCTELEM mb[16*24] __align8;
300
301 /**
302 * Cabac
303 */
304 CABACContext cabac;
305 uint8_t cabac_state[399];
306 int cabac_init_idc;
307
308 /* 0x100 -> non null luma_dc, 0x80/0x40 -> non null chroma_dc (cb/cr), 0x?0 -> chroma_cbp(0,1,2), 0x0? luma_cbp */
309 uint16_t *cbp_table;
310 /* chroma_pred_mode for i4x4 or i16x16, else 0 */
311 uint8_t *chroma_pred_mode_table;
312 int last_qscale_diff;
313 int16_t (*mvd_table[2])[2];
314 int16_t mvd_cache[2][5*8][2];
315
316 }H264Context;
317
318 static VLC coeff_token_vlc[4];
319 static VLC chroma_dc_coeff_token_vlc;
320
321 static VLC total_zeros_vlc[15];
322 static VLC chroma_dc_total_zeros_vlc[3];
323
324 static VLC run_vlc[6];
325 static VLC run7_vlc;
326
327 static void svq3_luma_dc_dequant_idct_c(DCTELEM *block, int qp);
328 static void svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp, int dc);
329 static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr);
330
331 static inline uint32_t pack16to32(int a, int b){
332 #ifdef WORDS_BIGENDIAN
333 return (b&0xFFFF) + (a<<16);
334 #else
335 return (a&0xFFFF) + (b<<16);
336 #endif
337 }
338
339 /**
340 * fill a rectangle.
341 * @param h height of the recatangle, should be a constant
342 * @param w width of the recatangle, should be a constant
343 * @param size the size of val (1 or 4), should be a constant
344 */
345 static inline void fill_rectangle(void *vp, int w, int h, int stride, uint32_t val, int size){ //FIXME ensure this IS inlined
346 uint8_t *p= (uint8_t*)vp;
347 assert(size==1 || size==4);
348
349 w *= size;
350 stride *= size;
351
352 //FIXME check what gcc generates for 64 bit on x86 and possible write a 32 bit ver of it
353 if(w==2 && h==2){
354 *(uint16_t*)(p + 0)=
355 *(uint16_t*)(p + stride)= size==4 ? val : val*0x0101;
356 }else if(w==2 && h==4){
357 *(uint16_t*)(p + 0*stride)=
358 *(uint16_t*)(p + 1*stride)=
359 *(uint16_t*)(p + 2*stride)=
360 *(uint16_t*)(p + 3*stride)= size==4 ? val : val*0x0101;
361 }else if(w==4 && h==1){
362 *(uint32_t*)(p + 0*stride)= size==4 ? val : val*0x01010101;
363 }else if(w==4 && h==2){
364 *(uint32_t*)(p + 0*stride)=
365 *(uint32_t*)(p + 1*stride)= size==4 ? val : val*0x01010101;
366 }else if(w==4 && h==4){
367 *(uint32_t*)(p + 0*stride)=
368 *(uint32_t*)(p + 1*stride)=
369 *(uint32_t*)(p + 2*stride)=
370 *(uint32_t*)(p + 3*stride)= size==4 ? val : val*0x01010101;
371 }else if(w==8 && h==1){
372 *(uint32_t*)(p + 0)=
373 *(uint32_t*)(p + 4)= size==4 ? val : val*0x01010101;
374 }else if(w==8 && h==2){
375 *(uint32_t*)(p + 0 + 0*stride)=
376 *(uint32_t*)(p + 4 + 0*stride)=
377 *(uint32_t*)(p + 0 + 1*stride)=
378 *(uint32_t*)(p + 4 + 1*stride)= size==4 ? val : val*0x01010101;
379 }else if(w==8 && h==4){
380 *(uint64_t*)(p + 0*stride)=
381 *(uint64_t*)(p + 1*stride)=
382 *(uint64_t*)(p + 2*stride)=
383 *(uint64_t*)(p + 3*stride)= size==4 ? val*0x0100000001ULL : val*0x0101010101010101ULL;
384 }else if(w==16 && h==2){
385 *(uint64_t*)(p + 0+0*stride)=
386 *(uint64_t*)(p + 8+0*stride)=
387 *(uint64_t*)(p + 0+1*stride)=
388 *(uint64_t*)(p + 8+1*stride)= size==4 ? val*0x0100000001ULL : val*0x0101010101010101ULL;
389 }else if(w==16 && h==4){
390 *(uint64_t*)(p + 0+0*stride)=
391 *(uint64_t*)(p + 8+0*stride)=
392 *(uint64_t*)(p + 0+1*stride)=
393 *(uint64_t*)(p + 8+1*stride)=
394 *(uint64_t*)(p + 0+2*stride)=
395 *(uint64_t*)(p + 8+2*stride)=
396 *(uint64_t*)(p + 0+3*stride)=
397 *(uint64_t*)(p + 8+3*stride)= size==4 ? val*0x0100000001ULL : val*0x0101010101010101ULL;
398 }else
399 assert(0);
400 }
401
402 static inline void fill_caches(H264Context *h, int mb_type){
403 MpegEncContext * const s = &h->s;
404 const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
405 int topleft_xy, top_xy, topright_xy, left_xy[2];
406 int topleft_type, top_type, topright_type, left_type[2];
407 int left_block[4];
408 int i;
409
410 //wow what a mess, why didnt they simplify the interlacing&intra stuff, i cant imagine that these complex rules are worth it
411
412 if(h->sps.mb_aff){
413 //FIXME
414 topleft_xy = 0; /* avoid warning */
415 top_xy = 0; /* avoid warning */
416 topright_xy = 0; /* avoid warning */
417 }else{
418 topleft_xy = mb_xy-1 - s->mb_stride;
419 top_xy = mb_xy - s->mb_stride;
420 topright_xy= mb_xy+1 - s->mb_stride;
421 left_xy[0] = mb_xy-1;
422 left_xy[1] = mb_xy-1;
423 left_block[0]= 0;
424 left_block[1]= 1;
425 left_block[2]= 2;
426 left_block[3]= 3;
427 }
428
429 topleft_type = h->slice_table[topleft_xy ] == h->slice_num ? s->current_picture.mb_type[topleft_xy] : 0;
430 top_type = h->slice_table[top_xy ] == h->slice_num ? s->current_picture.mb_type[top_xy] : 0;
431 topright_type= h->slice_table[topright_xy] == h->slice_num ? s->current_picture.mb_type[topright_xy]: 0;
432 left_type[0] = h->slice_table[left_xy[0] ] == h->slice_num ? s->current_picture.mb_type[left_xy[0]] : 0;
433 left_type[1] = h->slice_table[left_xy[1] ] == h->slice_num ? s->current_picture.mb_type[left_xy[1]] : 0;
434
435 if(IS_INTRA(mb_type)){
436 h->topleft_samples_available=
437 h->top_samples_available=
438 h->left_samples_available= 0xFFFF;
439 h->topright_samples_available= 0xEEEA;
440
441 if(!IS_INTRA(top_type) && (top_type==0 || h->pps.constrained_intra_pred)){
442 h->topleft_samples_available= 0xB3FF;
443 h->top_samples_available= 0x33FF;
444 h->topright_samples_available= 0x26EA;
445 }
446 for(i=0; i<2; i++){
447 if(!IS_INTRA(left_type[i]) && (left_type[i]==0 || h->pps.constrained_intra_pred)){
448 h->topleft_samples_available&= 0xDF5F;
449 h->left_samples_available&= 0x5F5F;
450 }
451 }
452
453 if(!IS_INTRA(topleft_type) && (topleft_type==0 || h->pps.constrained_intra_pred))
454 h->topleft_samples_available&= 0x7FFF;
455
456 if(!IS_INTRA(topright_type) && (topright_type==0 || h->pps.constrained_intra_pred))
457 h->topright_samples_available&= 0xFBFF;
458
459 if(IS_INTRA4x4(mb_type)){
460 if(IS_INTRA4x4(top_type)){
461 h->intra4x4_pred_mode_cache[4+8*0]= h->intra4x4_pred_mode[top_xy][4];
462 h->intra4x4_pred_mode_cache[5+8*0]= h->intra4x4_pred_mode[top_xy][5];
463 h->intra4x4_pred_mode_cache[6+8*0]= h->intra4x4_pred_mode[top_xy][6];
464 h->intra4x4_pred_mode_cache[7+8*0]= h->intra4x4_pred_mode[top_xy][3];
465 }else{
466 int pred;
467 if(IS_INTRA16x16(top_type) || (IS_INTER(top_type) && !h->pps.constrained_intra_pred))
468 pred= 2;
469 else{
470 pred= -1;
471 }
472 h->intra4x4_pred_mode_cache[4+8*0]=
473 h->intra4x4_pred_mode_cache[5+8*0]=
474 h->intra4x4_pred_mode_cache[6+8*0]=
475 h->intra4x4_pred_mode_cache[7+8*0]= pred;
476 }
477 for(i=0; i<2; i++){
478 if(IS_INTRA4x4(left_type[i])){
479 h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]= h->intra4x4_pred_mode[left_xy[i]][left_block[0+2*i]];
480 h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= h->intra4x4_pred_mode[left_xy[i]][left_block[1+2*i]];
481 }else{
482 int pred;
483 if(IS_INTRA16x16(left_type[i]) || (IS_INTER(left_type[i]) && !h->pps.constrained_intra_pred))
484 pred= 2;
485 else{
486 pred= -1;
487 }
488 h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]=
489 h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= pred;
490 }
491 }
492 }
493 }
494
495
496 /*
497 0 . T T. T T T T
498 1 L . .L . . . .
499 2 L . .L . . . .
500 3 . T TL . . . .
501 4 L . .L . . . .
502 5 L . .. . . . .
503 */
504 //FIXME constraint_intra_pred & partitioning & nnz (lets hope this is just a typo in the spec)
505 if(top_type){
506 h->non_zero_count_cache[4+8*0]= h->non_zero_count[top_xy][0];
507 h->non_zero_count_cache[5+8*0]= h->non_zero_count[top_xy][1];
508 h->non_zero_count_cache[6+8*0]= h->non_zero_count[top_xy][2];
509 h->non_zero_count_cache[7+8*0]= h->non_zero_count[top_xy][3];
510
511 h->non_zero_count_cache[1+8*0]= h->non_zero_count[top_xy][7];
512 h->non_zero_count_cache[2+8*0]= h->non_zero_count[top_xy][8];
513
514 h->non_zero_count_cache[1+8*3]= h->non_zero_count[top_xy][10];
515 h->non_zero_count_cache[2+8*3]= h->non_zero_count[top_xy][11];
516 }else{
517 h->non_zero_count_cache[4+8*0]=
518 h->non_zero_count_cache[5+8*0]=
519 h->non_zero_count_cache[6+8*0]=
520 h->non_zero_count_cache[7+8*0]=
521
522 h->non_zero_count_cache[1+8*0]=
523 h->non_zero_count_cache[2+8*0]=
524
525 h->non_zero_count_cache[1+8*3]=
526 h->non_zero_count_cache[2+8*3]= 64;
527 }
528
529 if(left_type[0]){
530 h->non_zero_count_cache[3+8*1]= h->non_zero_count[left_xy[0]][6];
531 h->non_zero_count_cache[3+8*2]= h->non_zero_count[left_xy[0]][5];
532 h->non_zero_count_cache[0+8*1]= h->non_zero_count[left_xy[0]][9]; //FIXME left_block
533 h->non_zero_count_cache[0+8*4]= h->non_zero_count[left_xy[0]][12];
534 }else{
535 h->non_zero_count_cache[3+8*1]=
536 h->non_zero_count_cache[3+8*2]=
537 h->non_zero_count_cache[0+8*1]=
538 h->non_zero_count_cache[0+8*4]= 64;
539 }
540
541 if(left_type[1]){
542 h->non_zero_count_cache[3+8*3]= h->non_zero_count[left_xy[1]][4];
543 h->non_zero_count_cache[3+8*4]= h->non_zero_count[left_xy[1]][3];
544 h->non_zero_count_cache[0+8*2]= h->non_zero_count[left_xy[1]][8];
545 h->non_zero_count_cache[0+8*5]= h->non_zero_count[left_xy[1]][11];
546 }else{
547 h->non_zero_count_cache[3+8*3]=
548 h->non_zero_count_cache[3+8*4]=
549 h->non_zero_count_cache[0+8*2]=
550 h->non_zero_count_cache[0+8*5]= 64;
551 }
552
553 #if 1
554 if(IS_INTER(mb_type)){
555 int list;
556 for(list=0; list<2; list++){
557 if((!IS_8X8(mb_type)) && !USES_LIST(mb_type, list)){
558 /*if(!h->mv_cache_clean[list]){
559 memset(h->mv_cache [list], 0, 8*5*2*sizeof(int16_t)); //FIXME clean only input? clean at all?
560 memset(h->ref_cache[list], PART_NOT_AVAILABLE, 8*5*sizeof(int8_t));
561 h->mv_cache_clean[list]= 1;
562 }*/
563 continue; //FIXME direct mode ...
564 }
565 h->mv_cache_clean[list]= 0;
566
567 if(IS_INTER(topleft_type)){
568 const int b_xy = h->mb2b_xy[topleft_xy] + 3 + 3*h->b_stride;
569 const int b8_xy= h->mb2b8_xy[topleft_xy] + 1 + h->b8_stride;
570 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy];
571 h->ref_cache[list][scan8[0] - 1 - 1*8]= s->current_picture.ref_index[list][b8_xy];
572 }else{
573 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= 0;
574 h->ref_cache[list][scan8[0] - 1 - 1*8]= topleft_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
575 }
576
577 if(IS_INTER(top_type)){
578 const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride;
579 const int b8_xy= h->mb2b8_xy[top_xy] + h->b8_stride;
580 *(uint32_t*)h->mv_cache[list][scan8[0] + 0 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 0];
581 *(uint32_t*)h->mv_cache[list][scan8[0] + 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 1];
582 *(uint32_t*)h->mv_cache[list][scan8[0] + 2 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 2];
583 *(uint32_t*)h->mv_cache[list][scan8[0] + 3 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 3];
584 h->ref_cache[list][scan8[0] + 0 - 1*8]=
585 h->ref_cache[list][scan8[0] + 1 - 1*8]= s->current_picture.ref_index[list][b8_xy + 0];
586 h->ref_cache[list][scan8[0] + 2 - 1*8]=
587 h->ref_cache[list][scan8[0] + 3 - 1*8]= s->current_picture.ref_index[list][b8_xy + 1];
588 }else{
589 *(uint32_t*)h->mv_cache [list][scan8[0] + 0 - 1*8]=
590 *(uint32_t*)h->mv_cache [list][scan8[0] + 1 - 1*8]=
591 *(uint32_t*)h->mv_cache [list][scan8[0] + 2 - 1*8]=
592 *(uint32_t*)h->mv_cache [list][scan8[0] + 3 - 1*8]= 0;
593 *(uint32_t*)&h->ref_cache[list][scan8[0] + 0 - 1*8]= ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101;
594 }
595
596 if(IS_INTER(topright_type)){
597 const int b_xy= h->mb2b_xy[topright_xy] + 3*h->b_stride;
598 const int b8_xy= h->mb2b8_xy[topright_xy] + h->b8_stride;
599 *(uint32_t*)h->mv_cache[list][scan8[0] + 4 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy];
600 h->ref_cache[list][scan8[0] + 4 - 1*8]= s->current_picture.ref_index[list][b8_xy];
601 }else{
602 *(uint32_t*)h->mv_cache [list][scan8[0] + 4 - 1*8]= 0;
603 h->ref_cache[list][scan8[0] + 4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
604 }
605
606 //FIXME unify cleanup or sth
607 if(IS_INTER(left_type[0])){
608 const int b_xy= h->mb2b_xy[left_xy[0]] + 3;
609 const int b8_xy= h->mb2b8_xy[left_xy[0]] + 1;
610 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 0*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[0]];
611 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[1]];
612 h->ref_cache[list][scan8[0] - 1 + 0*8]=
613 h->ref_cache[list][scan8[0] - 1 + 1*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[0]>>1)];
614 }else{
615 *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 0*8]=
616 *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 1*8]= 0;
617 h->ref_cache[list][scan8[0] - 1 + 0*8]=
618 h->ref_cache[list][scan8[0] - 1 + 1*8]= left_type[0] ? LIST_NOT_USED : PART_NOT_AVAILABLE;
619 }
620
621 if(IS_INTER(left_type[1])){
622 const int b_xy= h->mb2b_xy[left_xy[1]] + 3;
623 const int b8_xy= h->mb2b8_xy[left_xy[1]] + 1;
624 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 2*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[2]];
625 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 3*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[3]];
626 h->ref_cache[list][scan8[0] - 1 + 2*8]=
627 h->ref_cache[list][scan8[0] - 1 + 3*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[2]>>1)];
628 }else{
629 *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 2*8]=
630 *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 3*8]= 0;
631 h->ref_cache[list][scan8[0] - 1 + 2*8]=
632 h->ref_cache[list][scan8[0] - 1 + 3*8]= left_type[0] ? LIST_NOT_USED : PART_NOT_AVAILABLE;
633 }
634
635 h->ref_cache[list][scan8[5 ]+1] =
636 h->ref_cache[list][scan8[7 ]+1] =
637 h->ref_cache[list][scan8[13]+1] = //FIXME remove past 3 (init somewher else)
638 h->ref_cache[list][scan8[4 ]] =
639 h->ref_cache[list][scan8[12]] = PART_NOT_AVAILABLE;
640 *(uint32_t*)h->mv_cache [list][scan8[5 ]+1]=
641 *(uint32_t*)h->mv_cache [list][scan8[7 ]+1]=
642 *(uint32_t*)h->mv_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewher else)
643 *(uint32_t*)h->mv_cache [list][scan8[4 ]]=
644 *(uint32_t*)h->mv_cache [list][scan8[12]]= 0;
645
646 if( h->pps.cabac ) {
647 /* XXX beurk, Load mvd */
648 if(IS_INTER(topleft_type)){
649 const int b_xy = h->mb2b_xy[topleft_xy] + 3 + 3*h->b_stride;
650 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy];
651 }else{
652 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 - 1*8]= 0;
653 }
654
655 if(IS_INTER(top_type)){
656 const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride;
657 *(uint32_t*)h->mvd_cache[list][scan8[0] + 0 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 0];
658 *(uint32_t*)h->mvd_cache[list][scan8[0] + 1 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 1];
659 *(uint32_t*)h->mvd_cache[list][scan8[0] + 2 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 2];
660 *(uint32_t*)h->mvd_cache[list][scan8[0] + 3 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 3];
661 }else{
662 *(uint32_t*)h->mvd_cache [list][scan8[0] + 0 - 1*8]=
663 *(uint32_t*)h->mvd_cache [list][scan8[0] + 1 - 1*8]=
664 *(uint32_t*)h->mvd_cache [list][scan8[0] + 2 - 1*8]=
665 *(uint32_t*)h->mvd_cache [list][scan8[0] + 3 - 1*8]= 0;
666 }
667 if(IS_INTER(left_type[0])){
668 const int b_xy= h->mb2b_xy[left_xy[0]] + 3;
669 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 0*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[0]];
670 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[1]];
671 }else{
672 *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 0*8]=
673 *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 1*8]= 0;
674 }
675 if(IS_INTER(left_type[1])){
676 const int b_xy= h->mb2b_xy[left_xy[1]] + 3;
677 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 2*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[2]];
678 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 3*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[3]];
679 }else{
680 *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 2*8]=
681 *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 3*8]= 0;
682 }
683 *(uint32_t*)h->mvd_cache [list][scan8[5 ]+1]=
684 *(uint32_t*)h->mvd_cache [list][scan8[7 ]+1]=
685 *(uint32_t*)h->mvd_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewher else)
686 *(uint32_t*)h->mvd_cache [list][scan8[4 ]]=
687 *(uint32_t*)h->mvd_cache [list][scan8[12]]= 0;
688 }
689 }
690 //FIXME
691 }
692 #endif
693 }
694
695 static inline void write_back_intra_pred_mode(H264Context *h){
696 MpegEncContext * const s = &h->s;
697 const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
698
699 h->intra4x4_pred_mode[mb_xy][0]= h->intra4x4_pred_mode_cache[7+8*1];
700 h->intra4x4_pred_mode[mb_xy][1]= h->intra4x4_pred_mode_cache[7+8*2];
701 h->intra4x4_pred_mode[mb_xy][2]= h->intra4x4_pred_mode_cache[7+8*3];
702 h->intra4x4_pred_mode[mb_xy][3]= h->intra4x4_pred_mode_cache[7+8*4];
703 h->intra4x4_pred_mode[mb_xy][4]= h->intra4x4_pred_mode_cache[4+8*4];
704 h->intra4x4_pred_mode[mb_xy][5]= h->intra4x4_pred_mode_cache[5+8*4];
705 h->intra4x4_pred_mode[mb_xy][6]= h->intra4x4_pred_mode_cache[6+8*4];
706 }
707
708 /**
709 * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
710 */
711 static inline int check_intra4x4_pred_mode(H264Context *h){
712 MpegEncContext * const s = &h->s;
713 static const int8_t top [12]= {-1, 0,LEFT_DC_PRED,-1,-1,-1,-1,-1, 0};
714 static const int8_t left[12]= { 0,-1, TOP_DC_PRED, 0,-1,-1,-1, 0,-1,DC_128_PRED};
715 int i;
716
717 if(!(h->top_samples_available&0x8000)){
718 for(i=0; i<4; i++){
719 int status= top[ h->intra4x4_pred_mode_cache[scan8[0] + i] ];
720 if(status<0){
721 av_log(h->s.avctx, AV_LOG_ERROR, "top block unavailable for requested intra4x4 mode %d at %d %d\n", status, s->mb_x, s->mb_y);
722 return -1;
723 } else if(status){
724 h->intra4x4_pred_mode_cache[scan8[0] + i]= status;
725 }
726 }
727 }
728
729 if(!(h->left_samples_available&0x8000)){
730 for(i=0; i<4; i++){
731 int status= left[ h->intra4x4_pred_mode_cache[scan8[0] + 8*i] ];
732 if(status<0){
733 av_log(h->s.avctx, AV_LOG_ERROR, "left block unavailable for requested intra4x4 mode %d at %d %d\n", status, s->mb_x, s->mb_y);
734 return -1;
735 } else if(status){
736 h->intra4x4_pred_mode_cache[scan8[0] + 8*i]= status;
737 }
738 }
739 }
740
741 return 0;
742 } //FIXME cleanup like next
743
744 /**
745 * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
746 */
747 static inline int check_intra_pred_mode(H264Context *h, int mode){
748 MpegEncContext * const s = &h->s;
749 static const int8_t top [7]= {LEFT_DC_PRED8x8, 1,-1,-1};
750 static const int8_t left[7]= { TOP_DC_PRED8x8,-1, 2,-1,DC_128_PRED8x8};
751
752 if(mode < 0 || mode > 6)
753 return -1;
754
755 if(!(h->top_samples_available&0x8000)){
756 mode= top[ mode ];
757 if(mode<0){
758 av_log(h->s.avctx, AV_LOG_ERROR, "top block unavailable for requested intra mode at %d %d\n", s->mb_x, s->mb_y);
759 return -1;
760 }
761 }
762
763 if(!(h->left_samples_available&0x8000)){
764 mode= left[ mode ];
765 if(mode<0){
766 av_log(h->s.avctx, AV_LOG_ERROR, "left block unavailable for requested intra mode at %d %d\n", s->mb_x, s->mb_y);
767 return -1;
768 }
769 }
770
771 return mode;
772 }
773
774 /**
775 * gets the predicted intra4x4 prediction mode.
776 */
777 static inline int pred_intra_mode(H264Context *h, int n){
778 const int index8= scan8[n];
779 const int left= h->intra4x4_pred_mode_cache[index8 - 1];
780 const int top = h->intra4x4_pred_mode_cache[index8 - 8];
781 const int min= FFMIN(left, top);
782
783 tprintf("mode:%d %d min:%d\n", left ,top, min);
784
785 if(min<0) return DC_PRED;
786 else return min;
787 }
788
789 static inline void write_back_non_zero_count(H264Context *h){
790 MpegEncContext * const s = &h->s;
791 const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
792
793 h->non_zero_count[mb_xy][0]= h->non_zero_count_cache[4+8*4];
794 h->non_zero_count[mb_xy][1]= h->non_zero_count_cache[5+8*4];
795 h->non_zero_count[mb_xy][2]= h->non_zero_count_cache[6+8*4];
796 h->non_zero_count[mb_xy][3]= h->non_zero_count_cache[7+8*4];
797 h->non_zero_count[mb_xy][4]= h->non_zero_count_cache[7+8*3];
798 h->non_zero_count[mb_xy][5]= h->non_zero_count_cache[7+8*2];
799 h->non_zero_count[mb_xy][6]= h->non_zero_count_cache[7+8*1];
800
801 h->non_zero_count[mb_xy][7]= h->non_zero_count_cache[1+8*2];
802 h->non_zero_count[mb_xy][8]= h->non_zero_count_cache[2+8*2];
803 h->non_zero_count[mb_xy][9]= h->non_zero_count_cache[2+8*1];
804
805 h->non_zero_count[mb_xy][10]=h->non_zero_count_cache[1+8*5];
806 h->non_zero_count[mb_xy][11]=h->non_zero_count_cache[2+8*5];
807 h->non_zero_count[mb_xy][12]=h->non_zero_count_cache[2+8*4];
808 }
809
810 /**
811 * gets the predicted number of non zero coefficients.
812 * @param n block index
813 */
814 static inline int pred_non_zero_count(H264Context *h, int n){
815 const int index8= scan8[n];
816 const int left= h->non_zero_count_cache[index8 - 1];
817 const int top = h->non_zero_count_cache[index8 - 8];
818 int i= left + top;
819
820 if(i<64) i= (i+1)>>1;
821
822 tprintf("pred_nnz L%X T%X n%d s%d P%X\n", left, top, n, scan8[n], i&31);
823
824 return i&31;
825 }
826
827 static inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, int i, int list, int part_width){
828 const int topright_ref= h->ref_cache[list][ i - 8 + part_width ];
829
830 if(topright_ref != PART_NOT_AVAILABLE){
831 *C= h->mv_cache[list][ i - 8 + part_width ];
832 return topright_ref;
833 }else{
834 tprintf("topright MV not available\n");
835
836 *C= h->mv_cache[list][ i - 8 - 1 ];
837 return h->ref_cache[list][ i - 8 - 1 ];
838 }
839 }
840
841 /**
842 * gets the predicted MV.
843 * @param n the block index
844 * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
845 * @param mx the x component of the predicted motion vector
846 * @param my the y component of the predicted motion vector
847 */
848 static inline void pred_motion(H264Context * const h, int n, int part_width, int list, int ref, int * const mx, int * const my){
849 const int index8= scan8[n];
850 const int top_ref= h->ref_cache[list][ index8 - 8 ];
851 const int left_ref= h->ref_cache[list][ index8 - 1 ];
852 const int16_t * const A= h->mv_cache[list][ index8 - 1 ];
853 const int16_t * const B= h->mv_cache[list][ index8 - 8 ];
854 const int16_t * C;
855 int diagonal_ref, match_count;
856
857 assert(part_width==1 || part_width==2 || part_width==4);
858
859 /* mv_cache
860 B . . A T T T T
861 U . . L . . , .
862 U . . L . . . .
863 U . . L . . , .
864 . . . L . . . .
865 */
866
867 diagonal_ref= fetch_diagonal_mv(h, &C, index8, list, part_width);
868 match_count= (diagonal_ref==ref) + (top_ref==ref) + (left_ref==ref);
869 if(match_count > 1){ //most common
870 *mx= mid_pred(A[0], B[0], C[0]);
871 *my= mid_pred(A[1], B[1], C[1]);
872 }else if(match_count==1){
873 if(left_ref==ref){
874 *mx= A[0];
875 *my= A[1];
876 }else if(top_ref==ref){
877 *mx= B[0];
878 *my= B[1];
879 }else{
880 *mx= C[0];
881 *my= C[1];
882 }
883 }else{
884 if(top_ref == PART_NOT_AVAILABLE && diagonal_ref == PART_NOT_AVAILABLE && left_ref != PART_NOT_AVAILABLE){
885 *mx= A[0];
886 *my= A[1];
887 }else{
888 *mx= mid_pred(A[0], B[0], C[0]);
889 *my= mid_pred(A[1], B[1], C[1]);
890 }
891 }
892
893 tprintf("pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref, A[0], A[1], ref, *mx, *my, h->s.mb_x, h->s.mb_y, n, list);
894 }
895
896 /**
897 * gets the directionally predicted 16x8 MV.
898 * @param n the block index
899 * @param mx the x component of the predicted motion vector
900 * @param my the y component of the predicted motion vector
901 */
902 static inline void pred_16x8_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){
903 if(n==0){
904 const int top_ref= h->ref_cache[list][ scan8[0] - 8 ];
905 const int16_t * const B= h->mv_cache[list][ scan8[0] - 8 ];
906
907 tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d", top_ref, B[0], B[1], h->s.mb_x, h->s.mb_y, n, list);
908
909 if(top_ref == ref){
910 *mx= B[0];
911 *my= B[1];
912 return;
913 }
914 }else{
915 const int left_ref= h->ref_cache[list][ scan8[8] - 1 ];
916 const int16_t * const A= h->mv_cache[list][ scan8[8] - 1 ];
917
918 tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list);
919
920 if(left_ref == ref){
921 *mx= A[0];
922 *my= A[1];
923 return;
924 }
925 }
926
927 //RARE
928 pred_motion(h, n, 4, list, ref, mx, my);
929 }
930
931 /**
932 * gets the directionally predicted 8x16 MV.
933 * @param n the block index
934 * @param mx the x component of the predicted motion vector
935 * @param my the y component of the predicted motion vector
936 */
937 static inline void pred_8x16_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){
938 if(n==0){
939 const int left_ref= h->ref_cache[list][ scan8[0] - 1 ];
940 const int16_t * const A= h->mv_cache[list][ scan8[0] - 1 ];
941
942 tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list);
943
944 if(left_ref == ref){
945 *mx= A[0];
946 *my= A[1];
947 return;
948 }
949 }else{
950 const int16_t * C;
951 int diagonal_ref;
952
953 diagonal_ref= fetch_diagonal_mv(h, &C, scan8[4], list, 2);
954
955 tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d", diagonal_ref, C[0], C[1], h->s.mb_x, h->s.mb_y, n, list);
956
957 if(diagonal_ref == ref){
958 *mx= C[0];
959 *my= C[1];
960 return;
961 }
962 }
963
964 //RARE
965 pred_motion(h, n, 2, list, ref, mx, my);
966 }
967
968 static inline void pred_pskip_motion(H264Context * const h, int * const mx, int * const my){
969 const int top_ref = h->ref_cache[0][ scan8[0] - 8 ];
970 const int left_ref= h->ref_cache[0][ scan8[0] - 1 ];
971
972 tprintf("pred_pskip: (%d) (%d) at %2d %2d", top_ref, left_ref, h->s.mb_x, h->s.mb_y);
973
974 if(top_ref == PART_NOT_AVAILABLE || left_ref == PART_NOT_AVAILABLE
975 || (top_ref == 0 && *(uint32_t*)h->mv_cache[0][ scan8[0] - 8 ] == 0)
976 || (left_ref == 0 && *(uint32_t*)h->mv_cache[0][ scan8[0] - 1 ] == 0)){
977
978 *mx = *my = 0;
979 return;
980 }
981
982 pred_motion(h, 0, 4, 0, 0, mx, my);
983
984 return;
985 }
986
987 static inline void write_back_motion(H264Context *h, int mb_type){
988 MpegEncContext * const s = &h->s;
989 const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride;
990 const int b8_xy= 2*s->mb_x + 2*s->mb_y*h->b8_stride;
991 int list;
992
993 for(list=0; list<2; list++){
994 int y;
995 if((!IS_8X8(mb_type)) && !USES_LIST(mb_type, list)){
996 if(1){ //FIXME skip or never read if mb_type doesnt use it
997 for(y=0; y<4; y++){
998 *(uint64_t*)s->current_picture.motion_val[list][b_xy + 0 + y*h->b_stride]=
999 *(uint64_t*)s->current_picture.motion_val[list][b_xy + 2 + y*h->b_stride]= 0;
1000 }
1001 if( h->pps.cabac ) {
1002 /* FIXME needed ? */
1003 for(y=0; y<4; y++){
1004 *(uint64_t*)h->mvd_table[list][b_xy + 0 + y*h->b_stride]=
1005 *(uint64_t*)h->mvd_table[list][b_xy + 2 + y*h->b_stride]= 0;
1006 }
1007 }
1008 for(y=0; y<2; y++){
1009 *(uint16_t*)s->current_picture.motion_val[list][b8_xy + y*h->b8_stride]= (LIST_NOT_USED&0xFF)*0x0101;
1010 }
1011 }
1012 continue; //FIXME direct mode ...
1013 }
1014
1015 for(y=0; y<4; y++){
1016 *(uint64_t*)s->current_picture.motion_val[list][b_xy + 0 + y*h->b_stride]= *(uint64_t*)h->mv_cache[list][scan8[0]+0 + 8*y];
1017 *(uint64_t*)s->current_picture.motion_val[list][b_xy + 2 + y*h->b_stride]= *(uint64_t*)h->mv_cache[list][scan8[0]+2 + 8*y];
1018 }
1019 if( h->pps.cabac ) {
1020 for(y=0; y<4; y++){
1021 *(uint64_t*)h->mvd_table[list][b_xy + 0 + y*h->b_stride]= *(uint64_t*)h->mvd_cache[list][scan8[0]+0 + 8*y];
1022 *(uint64_t*)h->mvd_table[list][b_xy + 2 + y*h->b_stride]= *(uint64_t*)h->mvd_cache[list][scan8[0]+2 + 8*y];
1023 }
1024 }
1025 for(y=0; y<2; y++){
1026 s->current_picture.ref_index[list][b8_xy + 0 + y*h->b8_stride]= h->ref_cache[list][scan8[0]+0 + 16*y];
1027 s->current_picture.ref_index[list][b8_xy + 1 + y*h->b8_stride]= h->ref_cache[list][scan8[0]+2 + 16*y];
1028 }
1029 }
1030 }
1031
1032 /**
1033 * Decodes a network abstraction layer unit.
1034 * @param consumed is the number of bytes used as input
1035 * @param length is the length of the array
1036 * @param dst_length is the number of decoded bytes FIXME here or a decode rbsp ttailing?
1037 * @returns decoded bytes, might be src+1 if no escapes
1038 */
1039 static uint8_t *decode_nal(H264Context *h, uint8_t *src, int *dst_length, int *consumed, int length){
1040 int i, si, di;
1041 uint8_t *dst;
1042
1043 // src[0]&0x80; //forbidden bit
1044 h->nal_ref_idc= src[0]>>5;
1045 h->nal_unit_type= src[0]&0x1F;
1046
1047 src++; length--;
1048 #if 0
1049 for(i=0; i<length; i++)
1050 printf("%2X ", src[i]);
1051 #endif
1052 for(i=0; i+1<length; i+=2){
1053 if(src[i]) continue;
1054 if(i>0 && src[i-1]==0) i--;
1055 if(i+2<length && src[i+1]==0 && src[i+2]<=3){
1056 if(src[i+2]!=3){
1057 /* startcode, so we must be past the end */
1058 length=i;
1059 }
1060 break;
1061 }
1062 }
1063
1064 if(i>=length-1){ //no escaped 0
1065 *dst_length= length;
1066 *consumed= length+1; //+1 for the header
1067 return src;
1068 }
1069
1070 h->rbsp_buffer= av_fast_realloc(h->rbsp_buffer, &h->rbsp_buffer_size, length);
1071 dst= h->rbsp_buffer;
1072
1073 //printf("deoding esc\n");
1074 si=di=0;
1075 while(si<length){
1076 //remove escapes (very rare 1:2^22)
1077 if(si+2<length && src[si]==0 && src[si+1]==0 && src[si+2]<=3){
1078 if(src[si+2]==3){ //escape
1079 dst[di++]= 0;
1080 dst[di++]= 0;
1081 si+=3;
1082 continue;
1083 }else //next start code
1084 break;
1085 }
1086
1087 dst[di++]= src[si++];
1088 }
1089
1090 *dst_length= di;
1091 *consumed= si + 1;//+1 for the header
1092 //FIXME store exact number of bits in the getbitcontext (its needed for decoding)
1093 return dst;
1094 }
1095
1096 #if 0
1097 /**
1098 * @param src the data which should be escaped
1099 * @param dst the target buffer, dst+1 == src is allowed as a special case
1100 * @param length the length of the src data
1101 * @param dst_length the length of the dst array
1102 * @returns length of escaped data in bytes or -1 if an error occured
1103 */
1104 static int encode_nal(H264Context *h, uint8_t *dst, uint8_t *src, int length, int dst_length){
1105 int i, escape_count, si, di;
1106 uint8_t *temp;
1107
1108 assert(length>=0);
1109 assert(dst_length>0);
1110
1111 dst[0]= (h->nal_ref_idc<<5) + h->nal_unit_type;
1112
1113 if(length==0) return 1;
1114
1115 escape_count= 0;
1116 for(i=0; i<length; i+=2){
1117 if(src[i]) continue;
1118 if(i>0 && src[i-1]==0)
1119 i--;
1120 if(i+2<length && src[i+1]==0 && src[i+2]<=3){
1121 escape_count++;
1122 i+=2;
1123 }
1124 }
1125
1126 if(escape_count==0){
1127 if(dst+1 != src)
1128 memcpy(dst+1, src, length);
1129 return length + 1;
1130 }
1131
1132 if(length + escape_count + 1> dst_length)
1133 return -1;
1134
1135 //this should be damn rare (hopefully)
1136
1137 h->rbsp_buffer= av_fast_realloc(h->rbsp_buffer, &h->rbsp_buffer_size, length + escape_count);
1138 temp= h->rbsp_buffer;
1139 //printf("encoding esc\n");
1140
1141 si= 0;
1142 di= 0;
1143 while(si < length){
1144 if(si+2<length && src[si]==0 && src[si+1]==0 && src[si+2]<=3){
1145 temp[di++]= 0; si++;
1146 temp[di++]= 0; si++;
1147 temp[di++]= 3;
1148 temp[di++]= src[si++];
1149 }
1150 else
1151 temp[di++]= src[si++];
1152 }
1153 memcpy(dst+1, temp, length+escape_count);
1154
1155 assert(di == length+escape_count);
1156
1157 return di + 1;
1158 }
1159
1160 /**
1161 * write 1,10,100,1000,... for alignment, yes its exactly inverse to mpeg4
1162 */
1163 static void encode_rbsp_trailing(PutBitContext *pb){
1164 int length;
1165 put_bits(pb, 1, 1);
1166 length= (-put_bits_count(pb))&7;
1167 if(length) put_bits(pb, length, 0);
1168 }
1169 #endif
1170
1171 /**
1172 * identifies the exact end of the bitstream
1173 * @return the length of the trailing, or 0 if damaged
1174 */
1175 static int decode_rbsp_trailing(uint8_t *src){
1176 int v= *src;
1177 int r;
1178
1179 tprintf("rbsp trailing %X\n", v);
1180
1181 for(r=1; r<9; r++){
1182 if(v&1) return r;
1183 v>>=1;
1184 }
1185 return 0;
1186 }
1187
1188 /**
1189 * idct tranforms the 16 dc values and dequantize them.
1190 * @param qp quantization parameter
1191 */
1192 static void h264_luma_dc_dequant_idct_c(DCTELEM *block, int qp){
1193 const int qmul= dequant_coeff[qp][0];
1194 #define stride 16
1195 int i;
1196 int temp[16]; //FIXME check if this is a good idea
1197 static const int x_offset[4]={0, 1*stride, 4* stride, 5*stride};
1198 static const int y_offset[4]={0, 2*stride, 8* stride, 10*stride};
1199
1200 //memset(block, 64, 2*256);
1201 //return;
1202 for(i=0; i<4; i++){
1203 const int offset= y_offset[i];
1204 const int z0= block[offset+stride*0] + block[offset+stride*4];
1205 const int z1= block[offset+stride*0] - block[offset+stride*4];
1206 const int z2= block[offset+stride*1] - block[offset+stride*5];
1207 const int z3= block[offset+stride*1] + block[offset+stride*5];
1208
1209 temp[4*i+0]= z0+z3;
1210 temp[4*i+1]= z1+z2;
1211 temp[4*i+2]= z1-z2;
1212 temp[4*i+3]= z0-z3;
1213 }
1214
1215 for(i=0; i<4; i++){
1216 const int offset= x_offset[i];
1217 const int z0= temp[4*0+i] + temp[4*2+i];
1218 const int z1= temp[4*0+i] - temp[4*2+i];
1219 const int z2= temp[4*1+i] - temp[4*3+i];
1220 const int z3= temp[4*1+i] + temp[4*3+i];
1221
1222 block[stride*0 +offset]= ((z0 + z3)*qmul + 2)>>2; //FIXME think about merging this into decode_resdual
1223 block[stride*2 +offset]= ((z1 + z2)*qmul + 2)>>2;
1224 block[stride*8 +offset]= ((z1 - z2)*qmul + 2)>>2;
1225 block[stride*10+offset]= ((z0 - z3)*qmul + 2)>>2;
1226 }
1227 }
1228
1229 #if 0
1230 /**
1231 * dct tranforms the 16 dc values.
1232 * @param qp quantization parameter ??? FIXME
1233 */
1234 static void h264_luma_dc_dct_c(DCTELEM *block/*, int qp*/){
1235 // const int qmul= dequant_coeff[qp][0];
1236 int i;
1237 int temp[16]; //FIXME check if this is a good idea
1238 static const int x_offset[4]={0, 1*stride, 4* stride, 5*stride};
1239 static const int y_offset[4]={0, 2*stride, 8* stride, 10*stride};
1240
1241 for(i=0; i<4; i++){
1242 const int offset= y_offset[i];
1243 const int z0= block[offset+stride*0] + block[offset+stride*4];
1244 const int z1= block[offset+stride*0] - block[offset+stride*4];
1245 const int z2= block[offset+stride*1] - block[offset+stride*5];
1246 const int z3= block[offset+stride*1] + block[offset+stride*5];
1247
1248 temp[4*i+0]= z0+z3;
1249 temp[4*i+1]= z1+z2;
1250 temp[4*i+2]= z1-z2;
1251 temp[4*i+3]= z0-z3;
1252 }
1253
1254 for(i=0; i<4; i++){
1255 const int offset= x_offset[i];
1256 const int z0= temp[4*0+i] + temp[4*2+i];
1257 const int z1= temp[4*0+i] - temp[4*2+i];
1258 const int z2= temp[4*1+i] - temp[4*3+i];
1259 const int z3= temp[4*1+i] + temp[4*3+i];
1260
1261 block[stride*0 +offset]= (z0 + z3)>>1;
1262 block[stride*2 +offset]= (z1 + z2)>>1;
1263 block[stride*8 +offset]= (z1 - z2)>>1;
1264 block[stride*10+offset]= (z0 - z3)>>1;
1265 }
1266 }
1267 #endif
1268
1269 #undef xStride
1270 #undef stride
1271
1272 static void chroma_dc_dequant_idct_c(DCTELEM *block, int qp){
1273 const int qmul= dequant_coeff[qp][0];
1274 const int stride= 16*2;
1275 const int xStride= 16;
1276 int a,b,c,d,e;
1277
1278 a= block[stride*0 + xStride*0];
1279 b= block[stride*0 + xStride*1];
1280 c= block[stride*1 + xStride*0];
1281 d= block[stride*1 + xStride*1];
1282
1283 e= a-b;
1284 a= a+b;
1285 b= c-d;
1286 c= c+d;
1287
1288 block[stride*0 + xStride*0]= ((a+c)*qmul + 0)>>1;
1289 block[stride*0 + xStride*1]= ((e+b)*qmul + 0)>>1;
1290 block[stride*1 + xStride*0]= ((a-c)*qmul + 0)>>1;
1291 block[stride*1 + xStride*1]= ((e-b)*qmul + 0)>>1;
1292 }
1293
1294 #if 0
1295 static void chroma_dc_dct_c(DCTELEM *block){
1296 const int stride= 16*2;
1297 const int xStride= 16;
1298 int a,b,c,d,e;
1299
1300 a= block[stride*0 + xStride*0];
1301 b= block[stride*0 + xStride*1];
1302 c= block[stride*1 + xStride*0];
1303 d= block[stride*1 + xStride*1];
1304
1305 e= a-b;
1306 a= a+b;
1307 b= c-d;
1308 c= c+d;
1309
1310 block[stride*0 + xStride*0]= (a+c);
1311 block[stride*0 + xStride*1]= (e+b);
1312 block[stride*1 + xStride*0]= (a-c);
1313 block[stride*1 + xStride*1]= (e-b);
1314 }
1315 #endif
1316
1317 /**
1318 * gets the chroma qp.
1319 */
1320 static inline int get_chroma_qp(H264Context *h, int qscale){
1321
1322 return chroma_qp[clip(qscale + h->pps.chroma_qp_index_offset, 0, 51)];
1323 }
1324
1325
1326 /**
1327 *
1328 */
1329 static void h264_add_idct_c(uint8_t *dst, DCTELEM *block, int stride){
1330 int i;
1331 uint8_t *cm = cropTbl + MAX_NEG_CROP;
1332
1333 block[0] += 32;
1334
1335 for(i=0; i<4; i++){
1336 const int z0= block[0 + 4*i] + block[2 + 4*i];
1337 const int z1= block[0 + 4*i] - block[2 + 4*i];
1338 const int z2= (block[1 + 4*i]>>1) - block[3 + 4*i];
1339 const int z3= block[1 + 4*i] + (block[3 + 4*i]>>1);
1340
1341 block[0 + 4*i]= z0 + z3;
1342 block[1 + 4*i]= z1 + z2;
1343 block[2 + 4*i]= z1 - z2;
1344 block[3 + 4*i]= z0 - z3;
1345 }
1346
1347 for(i=0; i<4; i++){
1348 const int z0= block[i + 4*0] + block[i + 4*2];
1349 const int z1= block[i + 4*0] - block[i + 4*2];
1350 const int z2= (block[i + 4*1]>>1) - block[i + 4*3];
1351 const int z3= block[i + 4*1] + (block[i + 4*3]>>1);
1352
1353 dst[i + 0*stride]= cm[ dst[i + 0*stride] + ((z0 + z3) >> 6) ];
1354 dst[i + 1*stride]= cm[ dst[i + 1*stride] + ((z1 + z2) >> 6) ];
1355 dst[i + 2*stride]= cm[ dst[i + 2*stride] + ((z1 - z2) >> 6) ];
1356 dst[i + 3*stride]= cm[ dst[i + 3*stride] + ((z0 - z3) >> 6) ];
1357 }
1358 }
1359
1360 #if 0
1361 static void h264_diff_dct_c(DCTELEM *block, uint8_t *src1, uint8_t *src2, int stride){
1362 int i;
1363 //FIXME try int temp instead of block
1364
1365 for(i=0; i<4; i++){
1366 const int d0= src1[0 + i*stride] - src2[0 + i*stride];
1367 const int d1= src1[1 + i*stride] - src2[1 + i*stride];
1368 const int d2= src1[2 + i*stride] - src2[2 + i*stride];
1369 const int d3= src1[3 + i*stride] - src2[3 + i*stride];
1370 const int z0= d0 + d3;
1371 const int z3= d0 - d3;
1372 const int z1= d1 + d2;
1373 const int z2= d1 - d2;
1374
1375 block[0 + 4*i]= z0 + z1;
1376 block[1 + 4*i]= 2*z3 + z2;
1377 block[2 + 4*i]= z0 - z1;
1378 block[3 + 4*i]= z3 - 2*z2;
1379 }
1380
1381 for(i=0; i<4; i++){
1382 const int z0= block[0*4 + i] + block[3*4 + i];
1383 const int z3= block[0*4 + i] - block[3*4 + i];
1384 const int z1= block[1*4 + i] + block[2*4 + i];
1385 const int z2= block[1*4 + i] - block[2*4 + i];
1386
1387 block[0*4 + i]= z0 + z1;
1388 block[1*4 + i]= 2*z3 + z2;
1389 block[2*4 + i]= z0 - z1;
1390 block[3*4 + i]= z3 - 2*z2;
1391 }
1392 }
1393 #endif
1394
1395 //FIXME need to check that this doesnt overflow signed 32 bit for low qp, iam not sure, its very close
1396 //FIXME check that gcc inlines this (and optimizes intra & seperate_dc stuff away)
1397 static inline int quantize_c(DCTELEM *block, uint8_t *scantable, int qscale, int intra, int seperate_dc){
1398 int i;
1399 const int * const quant_table= quant_coeff[qscale];
1400 const int bias= intra ? (1<<QUANT_SHIFT)/3 : (1<<QUANT_SHIFT)/6;
1401 const unsigned int threshold1= (1<<QUANT_SHIFT) - bias - 1;
1402 const unsigned int threshold2= (threshold1<<1);
1403 int last_non_zero;
1404
1405 if(seperate_dc){
1406 if(qscale<=18){
1407 //avoid overflows
1408 const int dc_bias= intra ? (1<<(QUANT_SHIFT-2))/3 : (1<<(QUANT_SHIFT-2))/6;
1409 const unsigned int dc_threshold1= (1<<(QUANT_SHIFT-2)) - dc_bias - 1;
1410 const unsigned int dc_threshold2= (dc_threshold1<<1);
1411
1412 int level= block[0]*quant_coeff[qscale+18][0];
1413 if(((unsigned)(level+dc_threshold1))>dc_threshold2){
1414 if(level>0){
1415 level= (dc_bias + level)>>(QUANT_SHIFT-2);
1416 block[0]= level;
1417 }else{
1418 level= (dc_bias - level)>>(QUANT_SHIFT-2);
1419 block[0]= -level;
1420 }
1421 // last_non_zero = i;
1422 }else{
1423 block[0]=0;
1424 }
1425 }else{
1426 const int dc_bias= intra ? (1<<(QUANT_SHIFT+1))/3 : (1<<(QUANT_SHIFT+1))/6;
1427 const unsigned int dc_threshold1= (1<<(QUANT_SHIFT+1)) - dc_bias - 1;
1428 const unsigned int dc_threshold2= (dc_threshold1<<1);
1429
1430 int level= block[0]*quant_table[0];
1431 if(((unsigned)(level+dc_threshold1))>dc_threshold2){
1432 if(level>0){
1433 level= (dc_bias + level)>>(QUANT_SHIFT+1);
1434 block[0]= level;
1435 }else{
1436 level= (dc_bias - level)>>(QUANT_SHIFT+1);
1437 block[0]= -level;
1438 }
1439 // last_non_zero = i;
1440 }else{
1441 block[0]=0;
1442 }
1443 }
1444 last_non_zero= 0;
1445 i=1;
1446 }else{
1447 last_non_zero= -1;
1448 i=0;
1449 }
1450
1451 for(; i<16; i++){
1452 const int j= scantable[i];
1453 int level= block[j]*quant_table[j];
1454
1455 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
1456 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
1457 if(((unsigned)(level+threshold1))>threshold2){
1458 if(level>0){
1459 level= (bias + level)>>QUANT_SHIFT;
1460 block[j]= level;
1461 }else{
1462 level= (bias - level)>>QUANT_SHIFT;
1463 block[j]= -level;
1464 }
1465 last_non_zero = i;
1466 }else{
1467 block[j]=0;
1468 }
1469 }
1470
1471 return last_non_zero;
1472 }
1473
1474 static void pred4x4_vertical_c(uint8_t *src, uint8_t *topright, int stride){
1475 const uint32_t a= ((uint32_t*)(src-stride))[0];
1476 ((uint32_t*)(src+0*stride))[0]= a;
1477 ((uint32_t*)(src+1*stride))[0]= a;
1478 ((uint32_t*)(src+2*stride))[0]= a;
1479 ((uint32_t*)(src+3*stride))[0]= a;
1480 }
1481
1482 static void pred4x4_horizontal_c(uint8_t *src, uint8_t *topright, int stride){
1483 ((uint32_t*)(src+0*stride))[0]= src[-1+0*stride]*0x01010101;
1484 ((uint32_t*)(src+1*stride))[0]= src[-1+1*stride]*0x01010101;
1485 ((uint32_t*)(src+2*stride))[0]= src[-1+2*stride]*0x01010101;
1486 ((uint32_t*)(src+3*stride))[0]= src[-1+3*stride]*0x01010101;
1487 }
1488
1489 static void pred4x4_dc_c(uint8_t *src, uint8_t *topright, int stride){
1490 const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride]
1491 + src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 4) >>3;
1492
1493 ((uint32_t*)(src+0*stride))[0]=
1494 ((uint32_t*)(src+1*stride))[0]=
1495 ((uint32_t*)(src+2*stride))[0]=
1496 ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
1497 }
1498
1499 static void pred4x4_left_dc_c(uint8_t *src, uint8_t *topright, int stride){
1500 const int dc= ( src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 2) >>2;
1501
1502 ((uint32_t*)(src+0*stride))[0]=
1503 ((uint32_t*)(src+1*stride))[0]=
1504 ((uint32_t*)(src+2*stride))[0]=
1505 ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
1506 }
1507
1508 static void pred4x4_top_dc_c(uint8_t *src, uint8_t *topright, int stride){
1509 const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride] + 2) >>2;
1510
1511 ((uint32_t*)(src+0*stride))[0]=
1512 ((uint32_t*)(src+1*stride))[0]=
1513 ((uint32_t*)(src+2*stride))[0]=
1514 ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
1515 }
1516
1517 static void pred4x4_128_dc_c(uint8_t *src, uint8_t *topright, int stride){
1518 ((uint32_t*)(src+0*stride))[0]=
1519 ((uint32_t*)(src+1*stride))[0]=
1520 ((uint32_t*)(src+2*stride))[0]=
1521 ((uint32_t*)(src+3*stride))[0]= 128U*0x01010101U;
1522 }
1523
1524
1525 #define LOAD_TOP_RIGHT_EDGE\
1526 const int t4= topright[0];\
1527 const int t5= topright[1];\
1528 const int t6= topright[2];\
1529 const int t7= topright[3];\
1530
1531 #define LOAD_LEFT_EDGE\
1532 const int l0= src[-1+0*stride];\
1533 const int l1= src[-1+1*stride];\
1534 const int l2= src[-1+2*stride];\
1535 const int l3= src[-1+3*stride];\
1536
1537 #define LOAD_TOP_EDGE\
1538 const int t0= src[ 0-1*stride];\
1539 const int t1= src[ 1-1*stride];\
1540 const int t2= src[ 2-1*stride];\
1541 const int t3= src[ 3-1*stride];\
1542
1543 static void pred4x4_down_right_c(uint8_t *src, uint8_t *topright, int stride){
1544 const int lt= src[-1-1*stride];
1545 LOAD_TOP_EDGE
1546 LOAD_LEFT_EDGE
1547
1548 src[0+3*stride]=(l3 + 2*l2 + l1 + 2)>>2;
1549 src[0+2*stride]=
1550 src[1+3*stride]=(l2 + 2*l1 + l0 + 2)>>2;
1551 src[0+1*stride]=
1552 src[1+2*stride]=
1553 src[2+3*stride]=(l1 + 2*l0 + lt + 2)>>2;
1554 src[0+0*stride]=
1555 src[1+1*stride]=
1556 src[2+2*stride]=
1557 src[3+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
1558 src[1+0*stride]=
1559 src[2+1*stride]=
1560 src[3+2*stride]=(lt + 2*t0 + t1 + 2)>>2;
1561 src[2+0*stride]=
1562 src[3+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
1563 src[3+0*stride]=(t1 + 2*t2 + t3 + 2)>>2;
1564 }
1565
1566 static void pred4x4_down_left_c(uint8_t *src, uint8_t *topright, int stride){
1567 LOAD_TOP_EDGE
1568 LOAD_TOP_RIGHT_EDGE
1569 // LOAD_LEFT_EDGE
1570
1571 src[0+0*stride]=(t0 + t2 + 2*t1 + 2)>>2;
1572 src[1+0*stride]=
1573 src[0+1*stride]=(t1 + t3 + 2*t2 + 2)>>2;
1574 src[2+0*stride]=
1575 src[1+1*stride]=
1576 src[0+2*stride]=(t2 + t4 + 2*t3 + 2)>>2;
1577 src[3+0*stride]=
1578 src[2+1*stride]=
1579 src[1+2*stride]=
1580 src[0+3*stride]=(t3 + t5 + 2*t4 + 2)>>2;
1581 src[3+1*stride]=
1582 src[2+2*stride]=
1583 src[1+3*stride]=(t4 + t6 + 2*t5 + 2)>>2;
1584 src[3+2*stride]=
1585 src[2+3*stride]=(t5 + t7 + 2*t6 + 2)>>2;
1586 src[3+3*stride]=(t6 + 3*t7 + 2)>>2;
1587 }
1588
1589 static void pred4x4_vertical_right_c(uint8_t *src, uint8_t *topright, int stride){
1590 const int lt= src[-1-1*stride];
1591 LOAD_TOP_EDGE
1592 LOAD_LEFT_EDGE
1593 const __attribute__((unused)) int unu= l3;
1594
1595 src[0+0*stride]=
1596 src[1+2*stride]=(lt + t0 + 1)>>1;
1597 src[1+0*stride]=
1598 src[2+2*stride]=(t0 + t1 + 1)>>1;
1599 src[2+0*stride]=
1600 src[3+2*stride]=(t1 + t2 + 1)>>1;
1601 src[3+0*stride]=(t2 + t3 + 1)>>1;
1602 src[0+1*stride]=
1603 src[1+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
1604 src[1+1*stride]=
1605 src[2+3*stride]=(lt + 2*t0 + t1 + 2)>>2;
1606 src[2+1*stride]=
1607 src[3+3*stride]=(t0 + 2*t1 + t2 + 2)>>2;
1608 src[3+1*stride]=(t1 + 2*t2 + t3 + 2)>>2;
1609 src[0+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
1610 src[0+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
1611 }
1612
1613 static void pred4x4_vertical_left_c(uint8_t *src, uint8_t *topright, int stride){
1614 LOAD_TOP_EDGE
1615 LOAD_TOP_RIGHT_EDGE
1616 const __attribute__((unused)) int unu= t7;
1617
1618 src[0+0*stride]=(t0 + t1 + 1)>>1;
1619 src[1+0*stride]=
1620 src[0+2*stride]=(t1 + t2 + 1)>>1;
1621 src[2+0*stride]=
1622 src[1+2*stride]=(t2 + t3 + 1)>>1;
1623 src[3+0*stride]=
1624 src[2+2*stride]=(t3 + t4+ 1)>>1;
1625 src[3+2*stride]=(t4 + t5+ 1)>>1;
1626 src[0+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
1627 src[1+1*stride]=
1628 src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
1629 src[2+1*stride]=
1630 src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
1631 src[3+1*stride]=
1632 src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
1633 src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2;
1634 }
1635
1636 static void pred4x4_horizontal_up_c(uint8_t *src, uint8_t *topright, int stride){
1637 LOAD_LEFT_EDGE
1638
1639 src[0+0*stride]=(l0 + l1 + 1)>>1;
1640 src[1+0*stride]=(l0 + 2*l1 + l2 + 2)>>2;
1641 src[2+0*stride]=
1642 src[0+1*stride]=(l1 + l2 + 1)>>1;
1643 src[3+0*stride]=
1644 src[1+1*stride]=(l1 + 2*l2 + l3 + 2)>>2;
1645 src[2+1*stride]=
1646 src[0+2*stride]=(l2 + l3 + 1)>>1;
1647 src[3+1*stride]=
1648 src[1+2*stride]=(l2 + 2*l3 + l3 + 2)>>2;
1649 src[3+2*stride]=
1650 src[1+3*stride]=
1651 src[0+3*stride]=
1652 src[2+2*stride]=
1653 src[2+3*stride]=
1654 src[3+3*stride]=l3;
1655 }
1656
1657 static void pred4x4_horizontal_down_c(uint8_t *src, uint8_t *topright, int stride){
1658 const int lt= src[-1-1*stride];
1659 LOAD_TOP_EDGE
1660 LOAD_LEFT_EDGE
1661 const __attribute__((unused)) int unu= t3;
1662
1663 src[0+0*stride]=
1664 src[2+1*stride]=(lt + l0 + 1)>>1;
1665 src[1+0*stride]=
1666 src[3+1*stride]=(l0 + 2*lt + t0 + 2)>>2;
1667 src[2+0*stride]=(lt + 2*t0 + t1 + 2)>>2;
1668 src[3+0*stride]=(t0 + 2*t1 + t2 + 2)>>2;
1669 src[0+1*stride]=
1670 src[2+2*stride]=(l0 + l1 + 1)>>1;
1671 src[1+1*stride]=
1672 src[3+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
1673 src[0+2*stride]=
1674 src[2+3*stride]=(l1 + l2+ 1)>>1;
1675 src[1+2*stride]=
1676 src[3+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
1677 src[0+3*stride]=(l2 + l3 + 1)>>1;
1678 src[1+3*stride]=(l1 + 2*l2 + l3 + 2)>>2;
1679 }
1680
1681 static void pred16x16_vertical_c(uint8_t *src, int stride){
1682 int i;
1683 const uint32_t a= ((uint32_t*)(src-stride))[0];
1684 const uint32_t b= ((uint32_t*)(src-stride))[1];
1685 const uint32_t c= ((uint32_t*)(src-stride))[2];
1686 const uint32_t d= ((uint32_t*)(src-stride))[3];
1687
1688 for(i=0; i<16; i++){
1689 ((uint32_t*)(src+i*stride))[0]= a;
1690 ((uint32_t*)(src+i*stride))[1]= b;
1691 ((uint32_t*)(src+i*stride))[2]= c;
1692 ((uint32_t*)(src+i*stride))[3]= d;
1693 }
1694 }
1695
1696 static void pred16x16_horizontal_c(uint8_t *src, int stride){
1697 int i;
1698
1699 for(i=0; i<16; i++){
1700 ((uint32_t*)(src+i*stride))[0]=
1701 ((uint32_t*)(src+i*stride))[1]=
1702 ((uint32_t*)(src+i*stride))[2]=
1703 ((uint32_t*)(src+i*stride))[3]= src[-1+i*stride]*0x01010101;
1704 }
1705 }
1706
1707 static void pred16x16_dc_c(uint8_t *src, int stride){
1708 int i, dc=0;
1709
1710 for(i=0;i<16; i++){
1711 dc+= src[-1+i*stride];
1712 }
1713
1714 for(i=0;i<16; i++){
1715 dc+= src[i-stride];
1716 }
1717
1718 dc= 0x01010101*((dc + 16)>>5);
1719
1720 for(i=0; i<16; i++){
1721 ((uint32_t*)(src+i*stride))[0]=
1722 ((uint32_t*)(src+i*stride))[1]=
1723 ((uint32_t*)(src+i*stride))[2]=
1724 ((uint32_t*)(src+i*stride))[3]= dc;
1725 }
1726 }
1727
1728 static void pred16x16_left_dc_c(uint8_t *src, int stride){
1729 int i, dc=0;
1730
1731 for(i=0;i<16; i++){
1732 dc+= src[-1+i*stride];
1733 }
1734
1735 dc= 0x01010101*((dc + 8)>>4);
1736
1737 for(i=0; i<16; i++){
1738 ((uint32_t*)(src+i*stride))[0]=
1739 ((uint32_t*)(src+i*stride))[1]=
1740 ((uint32_t*)(src+i*stride))[2]=
1741 ((uint32_t*)(src+i*stride))[3]= dc;
1742 }
1743 }
1744
1745 static void pred16x16_top_dc_c(uint8_t *src, int stride){
1746 int i, dc=0;
1747
1748 for(i=0;i<16; i++){
1749 dc+= src[i-stride];
1750 }
1751 dc= 0x01010101*((dc + 8)>>4);
1752
1753 for(i=0; i<16; i++){
1754 ((uint32_t*)(src+i*stride))[0]=
1755 ((uint32_t*)(src+i*stride))[1]=
1756 ((uint32_t*)(src+i*stride))[2]=
1757 ((uint32_t*)(src+i*stride))[3]= dc;
1758 }
1759 }
1760
1761 static void pred16x16_128_dc_c(uint8_t *src, int stride){
1762 int i;
1763
1764 for(i=0; i<16; i++){
1765 ((uint32_t*)(src+i*stride))[0]=
1766 ((uint32_t*)(src+i*stride))[1]=
1767 ((uint32_t*)(src+i*stride))[2]=
1768 ((uint32_t*)(src+i*stride))[3]= 0x01010101U*128U;
1769 }
1770 }
1771
1772 static inline void pred16x16_plane_compat_c(uint8_t *src, int stride, const int svq3){
1773 int i, j, k;
1774 int a;
1775 uint8_t *cm = cropTbl + MAX_NEG_CROP;
1776 const uint8_t * const src0 = src+7-stride;
1777 const uint8_t *src1 = src+8*stride-1;
1778 const uint8_t *src2 = src1-2*stride; // == src+6*stride-1;
1779 int H = src0[1] - src0[-1];
1780 int V = src1[0] - src2[ 0];
1781 for(k=2; k<=8; ++k) {
1782 src1 += stride; src2 -= stride;
1783 H += k*(src0[k] - src0[-k]);
1784 V += k*(src1[0] - src2[ 0]);
1785 }
1786 if(svq3){
1787 H = ( 5*(H/4) ) / 16;
1788 V = ( 5*(V/4) ) / 16;
1789
1790 /* required for 100% accuracy */
1791 i = H; H = V; V = i;
1792 }else{
1793 H = ( 5*H+32 ) >> 6;
1794 V = ( 5*V+32 ) >> 6;
1795 }
1796
1797 a = 16*(src1[0] + src2[16] + 1) - 7*(V+H);
1798 for(j=16; j>0; --j) {
1799 int b = a;
1800 a += V;
1801 for(i=-16; i<0; i+=4) {
1802 src[16+i] = cm[ (b ) >> 5 ];
1803 src[17+i] = cm[ (b+ H) >> 5 ];
1804 src[18+i] = cm[ (b+2*H) >> 5 ];
1805 src[19+i] = cm[ (b+3*H) >> 5 ];
1806 b += 4*H;
1807 }
1808 src += stride;
1809 }
1810 }
1811
1812 static void pred16x16_plane_c(uint8_t *src, int stride){
1813 pred16x16_plane_compat_c(src, stride, 0);
1814 }
1815
1816 static void pred8x8_vertical_c(uint8_t *src, int stride){
1817 int i;
1818 const uint32_t a= ((uint32_t*)(src-stride))[0];
1819 const uint32_t b= ((uint32_t*)(src-stride))[1];
1820
1821 for(i=0; i<8; i++){
1822 ((uint32_t*)(src+i*stride))[0]= a;
1823 ((uint32_t*)(src+i*stride))[1]= b;
1824 }
1825 }
1826
1827 static void pred8x8_horizontal_c(uint8_t *src, int stride){
1828 int i;
1829
1830 for(i=0; i<8; i++){
1831 ((uint32_t*)(src+i*stride))[0]=
1832 ((uint32_t*)(src+i*stride))[1]= src[-1+i*stride]*0x01010101;
1833 }
1834 }
1835
1836 static void pred8x8_128_dc_c(uint8_t *src, int stride){
1837 int i;
1838
1839 for(i=0; i<4; i++){
1840 ((uint32_t*)(src+i*stride))[0]=
1841 ((uint32_t*)(src+i*stride))[1]= 0x01010101U*128U;
1842 }
1843 for(i=4; i<8; i++){
1844 ((uint32_t*)(src+i*stride))[0]=
1845 ((uint32_t*)(src+i*stride))[1]= 0x01010101U*128U;
1846 }
1847 }
1848
1849 static void pred8x8_left_dc_c(uint8_t *src, int stride){
1850 int i;
1851 int dc0, dc2;
1852
1853 dc0=dc2=0;
1854 for(i=0;i<4; i++){
1855 dc0+= src[-1+i*stride];
1856 dc2+= src[-1+(i+4)*stride];
1857 }
1858 dc0= 0x01010101*((dc0 + 2)>>2);
1859 dc2= 0x01010101*((dc2 + 2)>>2);
1860
1861 for(i=0; i<4; i++){
1862 ((uint32_t*)(src+i*stride))[0]=
1863 ((uint32_t*)(src+i*stride))[1]= dc0;
1864 }
1865 for(i=4; i<8; i++){
1866 ((uint32_t*)(src+i*stride))[0]=
1867 ((uint32_t*)(src+i*stride))[1]= dc2;
1868 }
1869 }
1870
1871 static void pred8x8_top_dc_c(uint8_t *src, int stride){
1872 int i;
1873 int dc0, dc1;
1874
1875 dc0=dc1=0;
1876 for(i=0;i<4; i++){
1877 dc0+= src[i-stride];
1878 dc1+= src[4+i-stride];
1879 }
1880 dc0= 0x01010101*((dc0 + 2)>>2);
1881 dc1= 0x01010101*((dc1 + 2)>>2);
1882
1883 for(i=0; i<4; i++){
1884 ((uint32_t*)(src+i*stride))[0]= dc0;
1885 ((uint32_t*)(src+i*stride))[1]= dc1;
1886 }
1887 for(i=4; i<8; i++){
1888 ((uint32_t*)(src+i*stride))[0]= dc0;
1889 ((uint32_t*)(src+i*stride))[1]= dc1;
1890 }
1891 }
1892
1893
1894 static void pred8x8_dc_c(uint8_t *src, int stride){
1895 int i;
1896 int dc0, dc1, dc2, dc3;
1897
1898 dc0=dc1=dc2=0;
1899 for(i=0;i<4; i++){
1900 dc0+= src[-1+i*stride] + src[i-stride];
1901 dc1+= src[4+i-stride];
1902 dc2+= src[-1+(i+4)*stride];
1903 }
1904 dc3= 0x01010101*((dc1 + dc2 + 4)>>3);
1905 dc0= 0x01010101*((dc0 + 4)>>3);
1906 dc1= 0x01010101*((dc1 + 2)>>2);
1907 dc2= 0x01010101*((dc2 + 2)>>2);
1908
1909 for(i=0; i<4; i++){
1910 ((uint32_t*)(src+i*stride))[0]= dc0;
1911 ((uint32_t*)(src+i*stride))[1]= dc1;
1912 }
1913 for(i=4; i<8; i++){
1914 ((uint32_t*)(src+i*stride))[0]= dc2;
1915 ((uint32_t*)(src+i*stride))[1]= dc3;
1916 }
1917 }
1918
1919 static void pred8x8_plane_c(uint8_t *src, int stride){
1920 int j, k;
1921 int a;
1922 uint8_t *cm = cropTbl + MAX_NEG_CROP;
1923 const uint8_t * const src0 = src+3-stride;
1924 const uint8_t *src1 = src+4*stride-1;
1925 const uint8_t *src2 = src1-2*stride; // == src+2*stride-1;
1926 int H = src0[1] - src0[-1];
1927 int V = src1[0] - src2[ 0];
1928 for(k=2; k<=4; ++k) {
1929 src1 += stride; src2 -= stride;
1930 H += k*(src0[k] - src0[-k]);
1931 V += k*(src1[0] - src2[ 0]);
1932 }
1933 H = ( 17*H+16 ) >> 5;
1934 V = ( 17*V+16 ) >> 5;
1935
1936 a = 16*(src1[0] + src2[8]+1) - 3*(V+H);
1937 for(j=8; j>0; --j) {
1938 int b = a;
1939 a += V;
1940 src[0] = cm[ (b ) >> 5 ];
1941 src[1] = cm[ (b+ H) >> 5 ];
1942 src[2] = cm[ (b+2*H) >> 5 ];
1943 src[3] = cm[ (b+3*H) >> 5 ];
1944 src[4] = cm[ (b+4*H) >> 5 ];
1945 src[5] = cm[ (b+5*H) >> 5 ];
1946 src[6] = cm[ (b+6*H) >> 5 ];
1947 src[7] = cm[ (b+7*H) >> 5 ];
1948 src += stride;
1949 }
1950 }
1951
1952 static inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square, int chroma_height, int delta, int list,
1953 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1954 int src_x_offset, int src_y_offset,
1955 qpel_mc_func *qpix_op, h264_chroma_mc_func chroma_op){
1956 MpegEncContext * const s = &h->s;
1957 const int mx= h->mv_cache[list][ scan8[n] ][0] + src_x_offset*8;
1958 const int my= h->mv_cache[list][ scan8[n] ][1] + src_y_offset*8;
1959 const int luma_xy= (mx&3) + ((my&3)<<2);
1960 uint8_t * src_y = pic->data[0] + (mx>>2) + (my>>2)*s->linesize;
1961 uint8_t * src_cb= pic->data[1] + (mx>>3) + (my>>3)*s->uvlinesize;
1962 uint8_t * src_cr= pic->data[2] + (mx>>3) + (my>>3)*s->uvlinesize;
1963 int extra_width= (s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16; //FIXME increase edge?, IMHO not worth it
1964 int extra_height= extra_width;
1965 int emu=0;
1966 const int full_mx= mx>>2;
1967 const int full_my= my>>2;
1968
1969 assert(pic->data[0]);
1970
1971 if(mx&7) extra_width -= 3;
1972 if(my&7) extra_height -= 3;
1973
1974 if( full_mx < 0-extra_width
1975 || full_my < 0-extra_height
1976 || full_mx + 16/*FIXME*/ > s->width + extra_width
1977 || full_my + 16/*FIXME*/ > s->height + extra_height){
1978 ff_emulated_edge_mc(s->edge_emu_buffer, src_y - 2 - 2*s->linesize, s->linesize, 16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, s->width, s->height);
1979 src_y= s->edge_emu_buffer + 2 + 2*s->linesize;
1980 emu=1;
1981 }
1982
1983 qpix_op[luma_xy](dest_y, src_y, s->linesize); //FIXME try variable height perhaps?
1984 if(!square){
1985 qpix_op[luma_xy](dest_y + delta, src_y + delta, s->linesize);
1986 }
1987
1988 if(s->flags&CODEC_FLAG_GRAY) return;
1989
1990 if(emu){
1991 ff_emulated_edge_mc(s->edge_emu_buffer, src_cb, s->uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), s->width>>1, s->height>>1);
1992 src_cb= s->edge_emu_buffer;
1993 }
1994 chroma_op(dest_cb, src_cb, s->uvlinesize, chroma_height, mx&7, my&7);
1995
1996 if(emu){
1997 ff_emulated_edge_mc(s->edge_emu_buffer, src_cr, s->uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), s->width>>1, s->height>>1);
1998 src_cr= s->edge_emu_buffer;
1999 }
2000 chroma_op(dest_cr, src_cr, s->uvlinesize, chroma_height, mx&7, my&7);
2001 }
2002
2003 static inline void mc_part(H264Context *h, int n, int square, int chroma_height, int delta,
2004 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2005 int x_offset, int y_offset,
2006 qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put,
2007 qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg,
2008 int list0, int list1){
2009 MpegEncContext * const s = &h->s;
2010 qpel_mc_func *qpix_op= qpix_put;
2011 h264_chroma_mc_func chroma_op= chroma_put;
2012
2013 dest_y += 2*x_offset + 2*y_offset*s-> linesize;
2014 dest_cb += x_offset + y_offset*s->uvlinesize;
2015 dest_cr += x_offset + y_offset*s->uvlinesize;
2016 x_offset += 8*s->mb_x;
2017 y_offset += 8*s->mb_y;
2018
2019 if(list0){
2020 Picture *ref= &h->ref_list[0][ h->ref_cache[0][ scan8[n] ] ];
2021 mc_dir_part(h, ref, n, square, chroma_height, delta, 0,
2022 dest_y, dest_cb, dest_cr, x_offset, y_offset,
2023 qpix_op, chroma_op);
2024
2025 qpix_op= qpix_avg;
2026 chroma_op= chroma_avg;
2027 }
2028
2029 if(list1){
2030 Picture *ref= &h->ref_list[1][ h->ref_cache[1][ scan8[n] ] ];
2031 mc_dir_part(h, ref, n, square, chroma_height, delta, 1,
2032 dest_y, dest_cb, dest_cr, x_offset, y_offset,
2033 qpix_op, chroma_op);
2034 }
2035 }
2036
2037 static void hl_motion(H264Context *h, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2038 qpel_mc_func (*qpix_put)[16], h264_chroma_mc_func (*chroma_put),
2039 qpel_mc_func (*qpix_avg)[16], h264_chroma_mc_func (*chroma_avg)){
2040 MpegEncContext * const s = &h->s;
2041 const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
2042 const int mb_type= s->current_picture.mb_type[mb_xy];
2043
2044 assert(IS_INTER(mb_type));
2045
2046 if(IS_16X16(mb_type)){
2047 mc_part(h, 0, 1, 8, 0, dest_y, dest_cb, dest_cr, 0, 0,
2048 qpix_put[0], chroma_put[0], qpix_avg[0], chroma_avg[0],
2049 IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
2050 }else if(IS_16X8(mb_type)){
2051 mc_part(h, 0, 0, 4, 8, dest_y, dest_cb, dest_cr, 0, 0,
2052 qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0],
2053 IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
2054 mc_part(h, 8, 0, 4, 8, dest_y, dest_cb, dest_cr, 0, 4,
2055 qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0],
2056 IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1));
2057 }else if(IS_8X16(mb_type)){
2058 mc_part(h, 0, 0, 8, 8*s->linesize, dest_y, dest_cb, dest_cr, 0, 0,
2059 qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
2060 IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
2061 mc_part(h, 4, 0, 8, 8*s->linesize, dest_y, dest_cb, dest_cr, 4, 0,
2062 qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
2063 IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1));
2064 }else{
2065 int i;
2066
2067 assert(IS_8X8(mb_type));
2068
2069 for(i=0; i<4; i++){
2070 const int sub_mb_type= h->sub_mb_type[i];
2071 const int n= 4*i;
2072 int x_offset= (i&1)<<2;
2073 int y_offset= (i&2)<<1;
2074
2075 if(IS_SUB_8X8(sub_mb_type)){
2076 mc_part(h, n, 1, 4, 0, dest_y, dest_cb, dest_cr, x_offset, y_offset,
2077 qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
2078 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
2079 }else if(IS_SUB_8X4(sub_mb_type)){
2080 mc_part(h, n , 0, 2, 4, dest_y, dest_cb, dest_cr, x_offset, y_offset,
2081 qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1],
2082 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
2083 mc_part(h, n+2, 0, 2, 4, dest_y, dest_cb, dest_cr, x_offset, y_offset+2,
2084 qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1],
2085 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
2086 }else if(IS_SUB_4X8(sub_mb_type)){
2087 mc_part(h, n , 0, 4, 4*s->linesize, dest_y, dest_cb, dest_cr, x_offset, y_offset,
2088 qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
2089 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
2090 mc_part(h, n+1, 0, 4, 4*s->linesize, dest_y, dest_cb, dest_cr, x_offset+2, y_offset,
2091 qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
2092 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
2093 }else{
2094 int j;
2095 assert(IS_SUB_4X4(sub_mb_type));
2096 for(j=0; j<4; j++){
2097 int sub_x_offset= x_offset + 2*(j&1);
2098 int sub_y_offset= y_offset + (j&2);
2099 mc_part(h, n+j, 1, 2, 0, dest_y, dest_cb, dest_cr, sub_x_offset, sub_y_offset,
2100 qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
2101 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
2102 }
2103 }
2104 }
2105 }
2106 }
2107
2108 static void decode_init_vlc(H264Context *h){
2109 static int done = 0;
2110
2111 if (!done) {
2112 int i;
2113 done = 1;
2114
2115 init_vlc(&chroma_dc_coeff_token_vlc, CHROMA_DC_COEFF_TOKEN_VLC_BITS, 4*5,
2116 &chroma_dc_coeff_token_len [0], 1, 1,
2117 &chroma_dc_coeff_token_bits[0], 1, 1);
2118
2119 for(i=0; i<4; i++){
2120 init_vlc(&coeff_token_vlc[i], COEFF_TOKEN_VLC_BITS, 4*17,
2121 &coeff_token_len [i][0], 1, 1,
2122 &coeff_token_bits[i][0], 1, 1);
2123 }
2124
2125 for(i=0; i<3; i++){
2126 init_vlc(&chroma_dc_total_zeros_vlc[i], CHROMA_DC_TOTAL_ZEROS_VLC_BITS, 4,
2127 &chroma_dc_total_zeros_len [i][0], 1, 1,
2128 &chroma_dc_total_zeros_bits[i][0], 1, 1);
2129 }
2130 for(i=0; i<15; i++){
2131 init_vlc(&total_zeros_vlc[i], TOTAL_ZEROS_VLC_BITS, 16,
2132 &total_zeros_len [i][0], 1, 1,
2133 &total_zeros_bits[i][0], 1, 1);
2134 }
2135
2136 for(i=0; i<6; i++){
2137 init_vlc(&run_vlc[i], RUN_VLC_BITS, 7,
2138 &run_len [i][0], 1, 1,
2139 &run_bits[i][0], 1, 1);
2140 }
2141 init_vlc(&run7_vlc, RUN7_VLC_BITS, 16,
2142 &run_len [6][0], 1, 1,
2143 &run_bits[6][0], 1, 1);
2144 }
2145 }
2146
2147 /**
2148 * Sets the intra prediction function pointers.
2149 */
2150 static void init_pred_ptrs(H264Context *h){
2151 // MpegEncContext * const s = &h->s;
2152
2153 h->pred4x4[VERT_PRED ]= pred4x4_vertical_c;
2154 h->pred4x4[HOR_PRED ]= pred4x4_horizontal_c;
2155 h->pred4x4[DC_PRED ]= pred4x4_dc_c;
2156 h->pred4x4[DIAG_DOWN_LEFT_PRED ]= pred4x4_down_left_c;
2157 h->pred4x4[DIAG_DOWN_RIGHT_PRED]= pred4x4_down_right_c;
2158 h->pred4x4[VERT_RIGHT_PRED ]= pred4x4_vertical_right_c;
2159 h->pred4x4[HOR_DOWN_PRED ]= pred4x4_horizontal_down_c;
2160 h->pred4x4[VERT_LEFT_PRED ]= pred4x4_vertical_left_c;
2161 h->pred4x4[HOR_UP_PRED ]= pred4x4_horizontal_up_c;
2162 h->pred4x4[LEFT_DC_PRED ]= pred4x4_left_dc_c;
2163 h->pred4x4[TOP_DC_PRED ]= pred4x4_top_dc_c;
2164 h->pred4x4[DC_128_PRED ]= pred4x4_128_dc_c;
2165
2166 h->pred8x8[DC_PRED8x8 ]= pred8x8_dc_c;
2167 h->pred8x8[VERT_PRED8x8 ]= pred8x8_vertical_c;
2168 h->pred8x8[HOR_PRED8x8 ]= pred8x8_horizontal_c;
2169 h->pred8x8[PLANE_PRED8x8 ]= pred8x8_plane_c;
2170 h->pred8x8[LEFT_DC_PRED8x8]= pred8x8_left_dc_c;
2171 h->pred8x8[TOP_DC_PRED8x8 ]= pred8x8_top_dc_c;
2172 h->pred8x8[DC_128_PRED8x8 ]= pred8x8_128_dc_c;
2173
2174 h->pred16x16[DC_PRED8x8 ]= pred16x16_dc_c;
2175 h->pred16x16[VERT_PRED8x8 ]= pred16x16_vertical_c;
2176 h->pred16x16[HOR_PRED8x8 ]= pred16x16_horizontal_c;
2177 h->pred16x16[PLANE_PRED8x8 ]= pred16x16_plane_c;
2178 h->pred16x16[LEFT_DC_PRED8x8]= pred16x16_left_dc_c;
2179 h->pred16x16[TOP_DC_PRED8x8 ]= pred16x16_top_dc_c;
2180 h->pred16x16[DC_128_PRED8x8 ]= pred16x16_128_dc_c;
2181 }
2182
2183 static void free_tables(H264Context *h){
2184 av_freep(&h->intra4x4_pred_mode);
2185 av_freep(&h->chroma_pred_mode_table);
2186 av_freep(&h->cbp_table);
2187 av_freep(&h->mvd_table[0]);
2188 av_freep(&h->mvd_table[1]);
2189 av_freep(&h->non_zero_count);
2190 av_freep(&h->slice_table_base);
2191 av_freep(&h->top_border);
2192 h->slice_table= NULL;
2193
2194 av_freep(&h->mb2b_xy);
2195 av_freep(&h->mb2b8_xy);
2196 }
2197
2198 /**
2199 * allocates tables.
2200 * needs widzh/height
2201 */
2202 static int alloc_tables(H264Context *h){
2203 MpegEncContext * const s = &h->s;
2204 const int big_mb_num= s->mb_stride * (s->mb_height+1);
2205 int x,y;
2206
2207 CHECKED_ALLOCZ(h->intra4x4_pred_mode, big_mb_num * 8 * sizeof(uint8_t))
2208
2209 CHECKED_ALLOCZ(h->non_zero_count , big_mb_num * 16 * sizeof(uint8_t))
2210 CHECKED_ALLOCZ(h->slice_table_base , big_mb_num * sizeof(uint8_t))
2211 CHECKED_ALLOCZ(h->top_border , s->mb_width * (16+8+8) * sizeof(uint8_t))
2212
2213 if( h->pps.cabac ) {
2214 CHECKED_ALLOCZ(h->chroma_pred_mode_table, big_mb_num * sizeof(uint8_t))
2215 CHECKED_ALLOCZ(h->cbp_table, big_mb_num * sizeof(uint16_t))
2216 CHECKED_ALLOCZ(h->mvd_table[0], 32*big_mb_num * sizeof(uint16_t));
2217 CHECKED_ALLOCZ(h->mvd_table[1], 32*big_mb_num * sizeof(uint16_t));
2218 }
2219
2220 memset(h->slice_table_base, -1, big_mb_num * sizeof(uint8_t));
2221 h->slice_table= h->slice_table_base + s->mb_stride + 1;
2222
2223 CHECKED_ALLOCZ(h->mb2b_xy , big_mb_num * sizeof(uint16_t));
2224 CHECKED_ALLOCZ(h->mb2b8_xy , big_mb_num * sizeof(uint16_t));
2225 for(y=0; y<s->mb_height; y++){
2226 for(x=0; x<s->mb_width; x++){
2227 const int mb_xy= x + y*s->mb_stride;
2228 const int b_xy = 4*x + 4*y*h->b_stride;
2229 const int b8_xy= 2*x + 2*y*h->b8_stride;
2230
2231 h->mb2b_xy [mb_xy]= b_xy;
2232 h->mb2b8_xy[mb_xy]= b8_xy;
2233 }
2234 }
2235
2236 return 0;
2237 fail:
2238 free_tables(h);
2239 return -1;
2240 }
2241
2242 static void common_init(H264Context *h){
2243 MpegEncContext * const s = &h->s;
2244
2245 s->width = s->avctx->width;
2246 s->height = s->avctx->height;
2247 s->codec_id= s->avctx->codec->id;
2248
2249 init_pred_ptrs(h);
2250
2251 s->unrestricted_mv=1;
2252 s->decode=1; //FIXME
2253 }
2254
2255 static int decode_init(AVCodecContext *avctx){
2256 H264Context *h= avctx->priv_data;
2257 MpegEncContext * const s = &h->s;
2258
2259 MPV_decode_defaults(s);
2260
2261 s->avctx = avctx;
2262 common_init(h);
2263
2264 s->out_format = FMT_H264;
2265 s->workaround_bugs= avctx->workaround_bugs;
2266
2267 // set defaults
2268 // s->decode_mb= ff_h263_decode_mb;
2269 s->low_delay= 1;
2270 avctx->pix_fmt= PIX_FMT_YUV420P;
2271
2272 decode_init_vlc(h);
2273
2274 if(avctx->codec_tag != 0x31637661) // avc1
2275 h->is_avc = 0;
2276 else {
2277 if((avctx->extradata_size == 0) || (avctx->extradata == NULL)) {
2278 av_log(avctx, AV_LOG_ERROR, "AVC codec requires avcC data\n");
2279 return -1;
2280 }
2281 h->is_avc = 1;
2282 h->got_avcC = 0;
2283 }
2284
2285 return 0;
2286 }
2287
2288 static void frame_start(H264Context *h){
2289 MpegEncContext * const s = &h->s;
2290 int i;
2291
2292 MPV_frame_start(s, s->avctx);
2293 ff_er_frame_start(s);
2294 h->mmco_index=0;
2295
2296 assert(s->linesize && s->uvlinesize);
2297
2298 for(i=0; i<16; i++){
2299 h->block_offset[i]= 4*((scan8[i] - scan8[0])&7) + 4*s->linesize*((scan8[i] - scan8[0])>>3);
2300 h->chroma_subblock_offset[i]= 2*((scan8[i] - scan8[0])&7) + 2*s->uvlinesize*((scan8[i] - scan8[0])>>3);
2301 }
2302 for(i=0; i<4; i++){
2303 h->block_offset[16+i]=
2304 h->block_offset[20+i]= 4*((scan8[i] - scan8[0])&7) + 4*s->uvlinesize*((scan8[i] - scan8[0])>>3);
2305 }
2306
2307 // s->decode= (s->flags&CODEC_FLAG_PSNR) || !s->encoding || s->current_picture.reference /*|| h->contains_intra*/ || 1;
2308 }
2309
2310 static inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize){
2311 MpegEncContext * const s = &h->s;
2312 int i;
2313
2314 src_y -= linesize;
2315 src_cb -= uvlinesize;
2316 src_cr -= uvlinesize;
2317
2318 h->left_border[0]= h->top_border[s->mb_x][15];
2319 for(i=1; i<17; i++){
2320 h->left_border[i]= src_y[15+i* linesize];
2321 }
2322
2323 *(uint64_t*)(h->top_border[s->mb_x]+0)= *(uint64_t*)(src_y + 16*linesize);
2324 *(uint64_t*)(h->top_border[s->mb_x]+8)= *(uint64_t*)(src_y +8+16*linesize);
2325
2326 if(!(s->flags&CODEC_FLAG_GRAY)){
2327 h->left_border[17 ]= h->top_border[s->mb_x][16+7];
2328 h->left_border[17+9]= h->top_border[s->mb_x][24+7];
2329 for(i=1; i<9; i++){
2330 h->left_border[i+17 ]= src_cb[7+i*uvlinesize];
2331 h->left_border[i+17+9]= src_cr[7+i*uvlinesize];
2332 }
2333 *(uint64_t*)(h->top_border[s->mb_x]+16)= *(uint64_t*)(src_cb+8*uvlinesize);
2334 *(uint64_t*)(h->top_border[s->mb_x]+24)= *(uint64_t*)(src_cr+8*uvlinesize);
2335 }
2336 }
2337
2338 static inline void xchg_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg){
2339 MpegEncContext * const s = &h->s;
2340 int temp8, i;
2341 uint64_t temp64;
2342 int deblock_left = (s->mb_x > 0);
2343 int deblock_top = (s->mb_y > 0);
2344
2345 src_y -= linesize + 1;
2346 src_cb -= uvlinesize + 1;
2347 src_cr -= uvlinesize + 1;
2348
2349 #define XCHG(a,b,t,xchg)\
2350 t= a;\
2351 if(xchg)\
2352 a= b;\
2353 b= t;
2354
2355 if(deblock_left){
2356 for(i = !deblock_top; i<17; i++){
2357 XCHG(h->left_border[i ], src_y [i* linesize], temp8, xchg);
2358 }
2359 }
2360
2361 if(deblock_top){
2362 XCHG(*(uint64_t*)(h->top_border[s->mb_x]+0), *(uint64_t*)(src_y +1), temp64, xchg);
2363 XCHG(*(uint64_t*)(h->top_border[s->mb_x]+8), *(uint64_t*)(src_y +9), temp64, 1);
2364 }
2365
2366 if(!(s->flags&CODEC_FLAG_GRAY)){
2367 if(deblock_left){
2368 for(i = !deblock_top; i<9; i++){
2369 XCHG(h->left_border[i+17 ], src_cb[i*uvlinesize], temp8, xchg);
2370 XCHG(h->left_border[i+17+9], src_cr[i*uvlinesize], temp8, xchg);
2371 }
2372 }
2373 if(deblock_top){
2374 XCHG(*(uint64_t*)(h->top_border[s->mb_x]+16), *(uint64_t*)(src_cb+1), temp64, 1);
2375 XCHG(*(uint64_t*)(h->top_border[s->mb_x]+24), *(uint64_t*)(src_cr+1), temp64, 1);
2376 }
2377 }
2378 }
2379
2380 static void hl_decode_mb(H264Context *h){
2381 MpegEncContext * const s = &h->s;
2382 const int mb_x= s->mb_x;
2383 const int mb_y= s->mb_y;
2384 const int mb_xy= mb_x + mb_y*s->mb_stride;
2385 const int mb_type= s->current_picture.mb_type[mb_xy];
2386 uint8_t *dest_y, *dest_cb, *dest_cr;
2387 int linesize, uvlinesize /*dct_offset*/;
2388 int i;
2389
2390 if(!s->decode)
2391 return;
2392
2393 if(s->mb_skiped){
2394 }
2395
2396 dest_y = s->current_picture.data[0] + (mb_y * 16* s->linesize ) + mb_x * 16;
2397 dest_cb = s->current_picture.data[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
2398 dest_cr = s->current_picture.data[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
2399
2400 if (h->mb_field_decoding_flag) {
2401 linesize = s->linesize * 2;
2402 uvlinesize = s->uvlinesize * 2;
2403 if(mb_y&1){ //FIXME move out of this func?
2404 dest_y -= s->linesize*15;
2405 dest_cb-= s->linesize*7;
2406 dest_cr-= s->linesize*7;
2407 }
2408 } else {
2409 linesize = s->linesize;
2410 uvlinesize = s->uvlinesize;
2411 // dct_offset = s->linesize * 16;
2412 }
2413
2414 if(IS_INTRA(mb_type)){
2415 if(h->deblocking_filter)
2416 xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 1);
2417
2418 if(!(s->flags&CODEC_FLAG_GRAY)){
2419 h->pred8x8[ h->chroma_pred_mode ](dest_cb, uvlinesize);
2420 h->pred8x8[ h->chroma_pred_mode ](dest_cr, uvlinesize);
2421 }
2422
2423 if(IS_INTRA4x4(mb_type)){
2424 if(!s->encoding){
2425 for(i=0; i<16; i++){
2426 uint8_t * const ptr= dest_y + h->block_offset[i];
2427 uint8_t *topright= ptr + 4 - linesize;
2428 const int topright_avail= (h->topright_samples_available<<i)&0x8000;
2429 const int dir= h->intra4x4_pred_mode_cache[ scan8[i] ];
2430 int tr;
2431
2432 if(!topright_avail){
2433 tr= ptr[3 - linesize]*0x01010101;
2434 topright= (uint8_t*) &tr;
2435 }else if(i==5 && h->deblocking_filter){
2436 tr= *(uint32_t*)h->top_border[mb_x+1];
2437 topright= (uint8_t*) &tr;
2438 }
2439
2440 h->pred4x4[ dir ](ptr, topright, linesize);
2441 if(h->non_zero_count_cache[ scan8[i] ]){
2442 if(s->codec_id == CODEC_ID_H264)
2443 h264_add_idct_c(ptr, h->mb + i*16, linesize);
2444 else
2445 svq3_add_idct_c(ptr, h->mb + i*16, linesize, s->qscale, 0);
2446 }
2447 }
2448 }
2449 }else{
2450 h->pred16x16[ h->intra16x16_pred_mode ](dest_y , linesize);
2451 if(s->codec_id == CODEC_ID_H264)
2452 h264_luma_dc_dequant_idct_c(h->mb, s->qscale);
2453 else
2454 svq3_luma_dc_dequant_idct_c(h->mb, s->qscale);
2455 }
2456 if(h->deblocking_filter)
2457 xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 0);
2458 }else if(s->codec_id == CODEC_ID_H264){
2459 hl_motion(h, dest_y, dest_cb, dest_cr,
2460 s->dsp.put_h264_qpel_pixels_tab, s->dsp.put_h264_chroma_pixels_tab,
2461 s->dsp.avg_h264_qpel_pixels_tab, s->dsp.avg_h264_chroma_pixels_tab);
2462 }
2463
2464
2465 if(!IS_INTRA4x4(mb_type)){
2466 if(s->codec_id == CODEC_ID_H264){
2467 for(i=0; i<16; i++){
2468 if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){ //FIXME benchmark weird rule, & below
2469 uint8_t * const ptr= dest_y + h->block_offset[i];
2470 h264_add_idct_c(ptr, h->mb + i*16, linesize);
2471 }
2472 }
2473 }else{
2474 for(i=0; i<16; i++){
2475 if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){ //FIXME benchmark weird rule, & below
2476 uint8_t * const ptr= dest_y + h->block_offset[i];
2477 svq3_add_idct_c(ptr, h->mb + i*16, linesize, s->qscale, IS_INTRA(mb_type) ? 1 : 0);
2478 }
2479 }
2480 }
2481 }
2482
2483 if(!(s->flags&CODEC_FLAG_GRAY)){
2484 chroma_dc_dequant_idct_c(h->mb + 16*16, h->chroma_qp);
2485 chroma_dc_dequant_idct_c(h->mb + 16*16+4*16, h->chroma_qp);
2486 if(s->codec_id == CODEC_ID_H264){
2487 for(i=16; i<16+4; i++){
2488 if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){
2489 uint8_t * const ptr= dest_cb + h->block_offset[i];
2490 h264_add_idct_c(ptr, h->mb + i*16, uvlinesize);
2491 }
2492 }
2493 for(i=20; i<20+4; i++){
2494 if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){
2495 uint8_t * const ptr= dest_cr + h->block_offset[i];
2496 h264_add_idct_c(ptr, h->mb + i*16, uvlinesize);
2497 }
2498 }
2499 }else{
2500 for(i=16; i<16+4; i++){
2501 if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){
2502 uint8_t * const ptr= dest_cb + h->block_offset[i];
2503 svq3_add_idct_c(ptr, h->mb + i*16, uvlinesize, chroma_qp[s->qscale + 12] - 12, 2);
2504 }
2505 }
2506 for(i=20; i<20+4; i++){
2507 if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){
2508 uint8_t * const ptr= dest_cr + h->block_offset[i];
2509 svq3_add_idct_c(ptr, h->mb + i*16, uvlinesize, chroma_qp[s->qscale + 12] - 12, 2);
2510 }
2511 }
2512 }
2513 }
2514 if(h->deblocking_filter) {
2515 backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
2516 filter_mb(h, mb_x, mb_y, dest_y, dest_cb, dest_cr);
2517 }
2518 }
2519
2520 /**
2521 * fills the default_ref_list.
2522 */
2523 static int fill_default_ref_list(H264Context *h){
2524 MpegEncContext * const s = &h->s;
2525 int i;
2526 Picture sorted_short_ref[16];
2527
2528 if(h->slice_type==B_TYPE){
2529 int out_i;
2530 int limit= -1;
2531
2532 for(out_i=0; out_i<h->short_ref_count; out_i++){
2533 int best_i=-1;
2534 int best_poc=-1;
2535
2536 for(i=0; i<h->short_ref_count; i++){
2537 const int poc= h->short_ref[i]->poc;
2538 if(poc > limit && poc < best_poc){
2539 best_poc= poc;
2540 best_i= i;
2541 }
2542 }
2543
2544 assert(best_i != -1);
2545
2546 limit= best_poc;
2547 sorted_short_ref[out_i]= *h->short_ref[best_i];
2548 }
2549 }
2550
2551 if(s->picture_structure == PICT_FRAME){
2552 if(h->slice_type==B_TYPE){
2553 const int current_poc= s->current_picture_ptr->poc;
2554 int list;
2555
2556 for(list=0; list<2; list++){
2557 int index=0;
2558
2559 for(i=0; i<h->short_ref_count && index < h->ref_count[list]; i++){
2560 const int i2= list ? h->short_ref_count - i - 1 : i;
2561 const int poc= sorted_short_ref[i2].poc;
2562
2563 if(sorted_short_ref[i2].reference != 3) continue; //FIXME refernce field shit
2564
2565 if((list==1 && poc > current_poc) || (list==0 && poc < current_poc)){
2566 h->default_ref_list[list][index ]= sorted_short_ref[i2];
2567 h->default_ref_list[list][index++].pic_id= sorted_short_ref[i2].frame_num;
2568 }
2569 }
2570
2571 for(i=0; i<h->long_ref_count && index < h->ref_count[ list ]; i++){
2572 if(h->long_ref[i]->reference != 3) continue;
2573
2574 h->default_ref_list[ list ][index ]= *h->long_ref[i];
2575 h->default_ref_list[ list ][index++].pic_id= i;;
2576 }
2577
2578 if(h->long_ref_count > 1 && h->short_ref_count==0){
2579 Picture temp= h->default_ref_list[1][0];
2580 h->default_ref_list[1][0] = h->default_ref_list[1][1];
2581 h->default_ref_list[1][0] = temp;
2582 }
2583
2584 if(index < h->ref_count[ list ])
2585 memset(&h->default_ref_list[list][index], 0, sizeof(Picture)*(h->ref_count[ list ] - index));
2586 }
2587 }else{
2588 int index=0;
2589 for(i=0; i<h->short_ref_count && index < h->ref_count[0]; i++){
2590 if(h->short_ref[i]->reference != 3) continue; //FIXME refernce field shit
2591 h->default_ref_list[0][index ]= *h->short_ref[i];
2592 h->default_ref_list[0][index++].pic_id= h->short_ref[i]->frame_num;
2593 }
2594 for(i=0; i<h->long_ref_count && index < h->ref_count[0]; i++){
2595 if(h->long_ref[i]->reference != 3) continue;
2596 h->default_ref_list[0][index ]= *h->long_ref[i];
2597 h->default_ref_list[0][index++].pic_id= i;;
2598 }
2599 if(index < h->ref_count[0])
2600 memset(&h->default_ref_list[0][index], 0, sizeof(Picture)*(h->ref_count[0] - index));
2601 }
2602 }else{ //FIELD
2603 if(h->slice_type==B_TYPE){
2604 }else{
2605 //FIXME second field balh
2606 }
2607 }
2608 return 0;
2609 }
2610
2611 static int decode_ref_pic_list_reordering(H264Context *h){
2612 MpegEncContext * const s = &h->s;
2613 int list;
2614
2615 if(h->slice_type==I_TYPE || h->slice_type==SI_TYPE) return 0; //FIXME move beofre func
2616
2617 for(list=0; list<2; list++){
2618 memcpy(h->ref_list[list], h->default_ref_list[list], sizeof(Picture)*h->ref_count[list]);
2619
2620 if(get_bits1(&s->gb)){
2621 int pred= h->curr_pic_num;
2622 int index;
2623
2624 for(index=0; ; index++){
2625 int reordering_of_pic_nums_idc= get_ue_golomb(&s->gb);
2626 int pic_id;
2627 int i;
2628
2629
2630 if(index >= h->ref_count[list]){
2631 av_log(h->s.avctx, AV_LOG_ERROR, "reference count overflow\n");
2632 return -1;
2633 }
2634
2635 if(reordering_of_pic_nums_idc<3){
2636 if(reordering_of_pic_nums_idc<2){
2637 const int abs_diff_pic_num= get_ue_golomb(&s->gb) + 1;
2638
2639 if(abs_diff_pic_num >= h->max_pic_num){
2640 av_log(h->s.avctx, AV_LOG_ERROR, "abs_diff_pic_num overflow\n");
2641 return -1;
2642 }
2643
2644 if(reordering_of_pic_nums_idc == 0) pred-= abs_diff_pic_num;
2645 else pred+= abs_diff_pic_num;
2646 pred &= h->max_pic_num - 1;
2647
2648 for(i= h->ref_count[list]-1; i>=index; i--){
2649 if(h->ref_list[list][i].pic_id == pred && h->ref_list[list][i].long_ref==0)
2650 break;
2651 }
2652 }else{
2653 pic_id= get_ue_golomb(&s->gb); //long_term_pic_idx
2654
2655 for(i= h->ref_count[list]-1; i>=index; i--){
2656 if(h->ref_list[list][i].pic_id == pic_id && h->ref_list[list][i].long_ref==1)
2657 break;
2658 }
2659 }
2660
2661 if(i < index){
2662 av_log(h->s.avctx, AV_LOG_ERROR, "reference picture missing during reorder\n");
2663 memset(&h->ref_list[list][index], 0, sizeof(Picture)); //FIXME
2664 }else if(i > index){
2665 Picture tmp= h->ref_list[list][i];
2666 for(; i>index; i--){
2667 h->ref_list[list][i]= h->ref_list[list][i-1];
2668 }
2669 h->ref_list[list][index]= tmp;
2670 }
2671 }else if(reordering_of_pic_nums_idc==3)
2672 break;
2673 else{
2674 av_log(h->s.avctx, AV_LOG_ERROR, "illegal reordering_of_pic_nums_idc\n");
2675 return -1;
2676 }
2677 }
2678 }
2679
2680 if(h->slice_type!=B_TYPE) break;
2681 }
2682 return 0;