segfault fix
[libav.git] / libavcodec / h264.c
1 /*
2 * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
3 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 */
20
21 /**
22 * @file h264.c
23 * H.264 / AVC / MPEG4 part10 codec.
24 * @author Michael Niedermayer <michaelni@gmx.at>
25 */
26
27 #include "common.h"
28 #include "dsputil.h"
29 #include "avcodec.h"
30 #include "mpegvideo.h"
31 #include "h264data.h"
32 #include "golomb.h"
33
34 #include "cabac.h"
35
36 #undef NDEBUG
37 #include <assert.h>
38
39 #define interlaced_dct interlaced_dct_is_a_bad_name
40 #define mb_intra mb_intra_isnt_initalized_see_mb_type
41
42 #define LUMA_DC_BLOCK_INDEX 25
43 #define CHROMA_DC_BLOCK_INDEX 26
44
45 #define CHROMA_DC_COEFF_TOKEN_VLC_BITS 8
46 #define COEFF_TOKEN_VLC_BITS 8
47 #define TOTAL_ZEROS_VLC_BITS 9
48 #define CHROMA_DC_TOTAL_ZEROS_VLC_BITS 3
49 #define RUN_VLC_BITS 3
50 #define RUN7_VLC_BITS 6
51
52 #define MAX_SPS_COUNT 32
53 #define MAX_PPS_COUNT 256
54
55 #define MAX_MMCO_COUNT 66
56
57 /**
58 * Sequence parameter set
59 */
60 typedef struct SPS{
61
62 int profile_idc;
63 int level_idc;
64 int log2_max_frame_num; ///< log2_max_frame_num_minus4 + 4
65 int poc_type; ///< pic_order_cnt_type
66 int log2_max_poc_lsb; ///< log2_max_pic_order_cnt_lsb_minus4
67 int delta_pic_order_always_zero_flag;
68 int offset_for_non_ref_pic;
69 int offset_for_top_to_bottom_field;
70 int poc_cycle_length; ///< num_ref_frames_in_pic_order_cnt_cycle
71 int ref_frame_count; ///< num_ref_frames
72 int gaps_in_frame_num_allowed_flag;
73 int mb_width; ///< frame_width_in_mbs_minus1 + 1
74 int mb_height; ///< frame_height_in_mbs_minus1 + 1
75 int frame_mbs_only_flag;
76 int mb_aff; ///<mb_adaptive_frame_field_flag
77 int direct_8x8_inference_flag;
78 int crop; ///< frame_cropping_flag
79 int crop_left; ///< frame_cropping_rect_left_offset
80 int crop_right; ///< frame_cropping_rect_right_offset
81 int crop_top; ///< frame_cropping_rect_top_offset
82 int crop_bottom; ///< frame_cropping_rect_bottom_offset
83 int vui_parameters_present_flag;
84 AVRational sar;
85 short offset_for_ref_frame[256]; //FIXME dyn aloc?
86 }SPS;
87
88 /**
89 * Picture parameter set
90 */
91 typedef struct PPS{
92 int sps_id;
93 int cabac; ///< entropy_coding_mode_flag
94 int pic_order_present; ///< pic_order_present_flag
95 int slice_group_count; ///< num_slice_groups_minus1 + 1
96 int mb_slice_group_map_type;
97 int ref_count[2]; ///< num_ref_idx_l0/1_active_minus1 + 1
98 int weighted_pred; ///< weighted_pred_flag
99 int weighted_bipred_idc;
100 int init_qp; ///< pic_init_qp_minus26 + 26
101 int init_qs; ///< pic_init_qs_minus26 + 26
102 int chroma_qp_index_offset;
103 int deblocking_filter_parameters_present; ///< deblocking_filter_parameters_present_flag
104 int constrained_intra_pred; ///< constrained_intra_pred_flag
105 int redundant_pic_cnt_present; ///< redundant_pic_cnt_present_flag
106 }PPS;
107
108 /**
109 * Memory management control operation opcode.
110 */
111 typedef enum MMCOOpcode{
112 MMCO_END=0,
113 MMCO_SHORT2UNUSED,
114 MMCO_LONG2UNUSED,
115 MMCO_SHORT2LONG,
116 MMCO_SET_MAX_LONG,
117 MMCO_RESET,
118 MMCO_LONG,
119 } MMCOOpcode;
120
121 /**
122 * Memory management control operation.
123 */
124 typedef struct MMCO{
125 MMCOOpcode opcode;
126 int short_frame_num;
127 int long_index;
128 } MMCO;
129
130 /**
131 * H264Context
132 */
133 typedef struct H264Context{
134 MpegEncContext s;
135 int nal_ref_idc;
136 int nal_unit_type;
137 #define NAL_SLICE 1
138 #define NAL_DPA 2
139 #define NAL_DPB 3
140 #define NAL_DPC 4
141 #define NAL_IDR_SLICE 5
142 #define NAL_SEI 6
143 #define NAL_SPS 7
144 #define NAL_PPS 8
145 #define NAL_PICTURE_DELIMITER 9
146 #define NAL_FILTER_DATA 10
147 uint8_t *rbsp_buffer;
148 int rbsp_buffer_size;
149
150 int chroma_qp; //QPc
151
152 int prev_mb_skiped; //FIXME remove (IMHO not used)
153
154 //prediction stuff
155 int chroma_pred_mode;
156 int intra16x16_pred_mode;
157
158 int8_t intra4x4_pred_mode_cache[5*8];
159 int8_t (*intra4x4_pred_mode)[8];
160 void (*pred4x4 [9+3])(uint8_t *src, uint8_t *topright, int stride);//FIXME move to dsp?
161 void (*pred8x8 [4+3])(uint8_t *src, int stride);
162 void (*pred16x16[4+3])(uint8_t *src, int stride);
163 unsigned int topleft_samples_available;
164 unsigned int top_samples_available;
165 unsigned int topright_samples_available;
166 unsigned int left_samples_available;
167 uint8_t (*top_border)[16+2*8];
168 uint8_t left_border[17+2*9];
169
170 /**
171 * non zero coeff count cache.
172 * is 64 if not available.
173 */
174 uint8_t non_zero_count_cache[6*8];
175 uint8_t (*non_zero_count)[16];
176
177 /**
178 * Motion vector cache.
179 */
180 int16_t mv_cache[2][5*8][2];
181 int8_t ref_cache[2][5*8];
182 #define LIST_NOT_USED -1 //FIXME rename?
183 #define PART_NOT_AVAILABLE -2
184
185 /**
186 * is 1 if the specific list MV&references are set to 0,0,-2.
187 */
188 int mv_cache_clean[2];
189
190 int block_offset[16+8];
191 int chroma_subblock_offset[16]; //FIXME remove
192
193 uint16_t *mb2b_xy; //FIXME are these 4 a good idea?
194 uint16_t *mb2b8_xy;
195 int b_stride;
196 int b8_stride;
197
198 int halfpel_flag;
199 int thirdpel_flag;
200
201 int unknown_svq3_flag;
202 int next_slice_index;
203
204 SPS sps_buffer[MAX_SPS_COUNT];
205 SPS sps; ///< current sps
206
207 PPS pps_buffer[MAX_PPS_COUNT];
208 /**
209 * current pps
210 */
211 PPS pps; //FIXME move tp Picture perhaps? (->no) do we need that?
212
213 int slice_num;
214 uint8_t *slice_table_base;
215 uint8_t *slice_table; ///< slice_table_base + mb_stride + 1
216 int slice_type;
217 int slice_type_fixed;
218
219 //interlacing specific flags
220 int mb_field_decoding_flag;
221
222 int sub_mb_type[4];
223
224 //POC stuff
225 int poc_lsb;
226 int poc_msb;
227 int delta_poc_bottom;
228 int delta_poc[2];
229 int frame_num;
230 int prev_poc_msb; ///< poc_msb of the last reference pic for POC type 0
231 int prev_poc_lsb; ///< poc_lsb of the last reference pic for POC type 0
232 int frame_num_offset; ///< for POC type 2
233 int prev_frame_num_offset; ///< for POC type 2
234 int prev_frame_num; ///< frame_num of the last pic for POC type 1/2
235
236 /**
237 * frame_num for frames or 2*frame_num for field pics.
238 */
239 int curr_pic_num;
240
241 /**
242 * max_frame_num or 2*max_frame_num for field pics.
243 */
244 int max_pic_num;
245
246 //Weighted pred stuff
247 int luma_log2_weight_denom;
248 int chroma_log2_weight_denom;
249 int luma_weight[2][16];
250 int luma_offset[2][16];
251 int chroma_weight[2][16][2];
252 int chroma_offset[2][16][2];
253
254 //deblock
255 int deblocking_filter; ///< disable_deblocking_filter_idc with 1<->0
256 int slice_alpha_c0_offset;
257 int slice_beta_offset;
258
259 int redundant_pic_count;
260
261 int direct_spatial_mv_pred;
262
263 /**
264 * num_ref_idx_l0/1_active_minus1 + 1
265 */
266 int ref_count[2];// FIXME split for AFF
267 Picture *short_ref[16];
268 Picture *long_ref[16];
269 Picture default_ref_list[2][32];
270 Picture ref_list[2][32]; //FIXME size?
271 Picture field_ref_list[2][32]; //FIXME size?
272
273 /**
274 * memory management control operations buffer.
275 */
276 MMCO mmco[MAX_MMCO_COUNT];
277 int mmco_index;
278
279 int long_ref_count; ///< number of actual long term references
280 int short_ref_count; ///< number of actual short term references
281
282 //data partitioning
283 GetBitContext intra_gb;
284 GetBitContext inter_gb;
285 GetBitContext *intra_gb_ptr;
286 GetBitContext *inter_gb_ptr;
287
288 DCTELEM mb[16*24] __align8;
289
290 /**
291 * Cabac
292 */
293 CABACContext cabac;
294 uint8_t cabac_state[399];
295 int cabac_init_idc;
296
297 /* 0x100 -> non null luma_dc, 0x80/0x40 -> non null chroma_dc (cb/cr), 0x?0 -> chroma_cbp(0,1,2), 0x0? luma_cbp */
298 uint16_t *cbp_table;
299 /* chroma_pred_mode for i4x4 or i16x16, else 0 */
300 uint8_t *chroma_pred_mode_table;
301 int last_qscale_diff;
302 int16_t (*mvd_table[2])[2];
303 int16_t mvd_cache[2][5*8][2];
304
305 }H264Context;
306
307 static VLC coeff_token_vlc[4];
308 static VLC chroma_dc_coeff_token_vlc;
309
310 static VLC total_zeros_vlc[15];
311 static VLC chroma_dc_total_zeros_vlc[3];
312
313 static VLC run_vlc[6];
314 static VLC run7_vlc;
315
316 static void svq3_luma_dc_dequant_idct_c(DCTELEM *block, int qp);
317 static void svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp, int dc);
318 static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr);
319
320 static inline uint32_t pack16to32(int a, int b){
321 #ifdef WORDS_BIGENDIAN
322 return (b&0xFFFF) + (a<<16);
323 #else
324 return (a&0xFFFF) + (b<<16);
325 #endif
326 }
327
328 /**
329 * fill a rectangle.
330 * @param h height of the recatangle, should be a constant
331 * @param w width of the recatangle, should be a constant
332 * @param size the size of val (1 or 4), should be a constant
333 */
334 static inline void fill_rectangle(void *vp, int w, int h, int stride, uint32_t val, int size){ //FIXME ensure this IS inlined
335 uint8_t *p= (uint8_t*)vp;
336 assert(size==1 || size==4);
337
338 w *= size;
339 stride *= size;
340
341 //FIXME check what gcc generates for 64 bit on x86 and possible write a 32 bit ver of it
342 if(w==2 && h==2){
343 *(uint16_t*)(p + 0)=
344 *(uint16_t*)(p + stride)= size==4 ? val : val*0x0101;
345 }else if(w==2 && h==4){
346 *(uint16_t*)(p + 0*stride)=
347 *(uint16_t*)(p + 1*stride)=
348 *(uint16_t*)(p + 2*stride)=
349 *(uint16_t*)(p + 3*stride)= size==4 ? val : val*0x0101;
350 }else if(w==4 && h==1){
351 *(uint32_t*)(p + 0*stride)= size==4 ? val : val*0x01010101;
352 }else if(w==4 && h==2){
353 *(uint32_t*)(p + 0*stride)=
354 *(uint32_t*)(p + 1*stride)= size==4 ? val : val*0x01010101;
355 }else if(w==4 && h==4){
356 *(uint32_t*)(p + 0*stride)=
357 *(uint32_t*)(p + 1*stride)=
358 *(uint32_t*)(p + 2*stride)=
359 *(uint32_t*)(p + 3*stride)= size==4 ? val : val*0x01010101;
360 }else if(w==8 && h==1){
361 *(uint32_t*)(p + 0)=
362 *(uint32_t*)(p + 4)= size==4 ? val : val*0x01010101;
363 }else if(w==8 && h==2){
364 *(uint32_t*)(p + 0 + 0*stride)=
365 *(uint32_t*)(p + 4 + 0*stride)=
366 *(uint32_t*)(p + 0 + 1*stride)=
367 *(uint32_t*)(p + 4 + 1*stride)= size==4 ? val : val*0x01010101;
368 }else if(w==8 && h==4){
369 *(uint64_t*)(p + 0*stride)=
370 *(uint64_t*)(p + 1*stride)=
371 *(uint64_t*)(p + 2*stride)=
372 *(uint64_t*)(p + 3*stride)= size==4 ? val*0x0100000001ULL : val*0x0101010101010101ULL;
373 }else if(w==16 && h==2){
374 *(uint64_t*)(p + 0+0*stride)=
375 *(uint64_t*)(p + 8+0*stride)=
376 *(uint64_t*)(p + 0+1*stride)=
377 *(uint64_t*)(p + 8+1*stride)= size==4 ? val*0x0100000001ULL : val*0x0101010101010101ULL;
378 }else if(w==16 && h==4){
379 *(uint64_t*)(p + 0+0*stride)=
380 *(uint64_t*)(p + 8+0*stride)=
381 *(uint64_t*)(p + 0+1*stride)=
382 *(uint64_t*)(p + 8+1*stride)=
383 *(uint64_t*)(p + 0+2*stride)=
384 *(uint64_t*)(p + 8+2*stride)=
385 *(uint64_t*)(p + 0+3*stride)=
386 *(uint64_t*)(p + 8+3*stride)= size==4 ? val*0x0100000001ULL : val*0x0101010101010101ULL;
387 }else
388 assert(0);
389 }
390
391 static inline void fill_caches(H264Context *h, int mb_type){
392 MpegEncContext * const s = &h->s;
393 const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
394 int topleft_xy, top_xy, topright_xy, left_xy[2];
395 int topleft_type, top_type, topright_type, left_type[2];
396 int left_block[4];
397 int i;
398
399 //wow what a mess, why didnt they simplify the interlacing&intra stuff, i cant imagine that these complex rules are worth it
400
401 if(h->sps.mb_aff){
402 //FIXME
403 topleft_xy = 0; /* avoid warning */
404 top_xy = 0; /* avoid warning */
405 topright_xy = 0; /* avoid warning */
406 }else{
407 topleft_xy = mb_xy-1 - s->mb_stride;
408 top_xy = mb_xy - s->mb_stride;
409 topright_xy= mb_xy+1 - s->mb_stride;
410 left_xy[0] = mb_xy-1;
411 left_xy[1] = mb_xy-1;
412 left_block[0]= 0;
413 left_block[1]= 1;
414 left_block[2]= 2;
415 left_block[3]= 3;
416 }
417
418 topleft_type = h->slice_table[topleft_xy ] == h->slice_num ? s->current_picture.mb_type[topleft_xy] : 0;
419 top_type = h->slice_table[top_xy ] == h->slice_num ? s->current_picture.mb_type[top_xy] : 0;
420 topright_type= h->slice_table[topright_xy] == h->slice_num ? s->current_picture.mb_type[topright_xy]: 0;
421 left_type[0] = h->slice_table[left_xy[0] ] == h->slice_num ? s->current_picture.mb_type[left_xy[0]] : 0;
422 left_type[1] = h->slice_table[left_xy[1] ] == h->slice_num ? s->current_picture.mb_type[left_xy[1]] : 0;
423
424 if(IS_INTRA(mb_type)){
425 h->topleft_samples_available=
426 h->top_samples_available=
427 h->left_samples_available= 0xFFFF;
428 h->topright_samples_available= 0xEEEA;
429
430 if(!IS_INTRA(top_type) && (top_type==0 || h->pps.constrained_intra_pred)){
431 h->topleft_samples_available= 0xB3FF;
432 h->top_samples_available= 0x33FF;
433 h->topright_samples_available= 0x26EA;
434 }
435 for(i=0; i<2; i++){
436 if(!IS_INTRA(left_type[i]) && (left_type[i]==0 || h->pps.constrained_intra_pred)){
437 h->topleft_samples_available&= 0xDF5F;
438 h->left_samples_available&= 0x5F5F;
439 }
440 }
441
442 if(!IS_INTRA(topleft_type) && (topleft_type==0 || h->pps.constrained_intra_pred))
443 h->topleft_samples_available&= 0x7FFF;
444
445 if(!IS_INTRA(topright_type) && (topright_type==0 || h->pps.constrained_intra_pred))
446 h->topright_samples_available&= 0xFBFF;
447
448 if(IS_INTRA4x4(mb_type)){
449 if(IS_INTRA4x4(top_type)){
450 h->intra4x4_pred_mode_cache[4+8*0]= h->intra4x4_pred_mode[top_xy][4];
451 h->intra4x4_pred_mode_cache[5+8*0]= h->intra4x4_pred_mode[top_xy][5];
452 h->intra4x4_pred_mode_cache[6+8*0]= h->intra4x4_pred_mode[top_xy][6];
453 h->intra4x4_pred_mode_cache[7+8*0]= h->intra4x4_pred_mode[top_xy][3];
454 }else{
455 int pred;
456 if(IS_INTRA16x16(top_type) || (IS_INTER(top_type) && !h->pps.constrained_intra_pred))
457 pred= 2;
458 else{
459 pred= -1;
460 }
461 h->intra4x4_pred_mode_cache[4+8*0]=
462 h->intra4x4_pred_mode_cache[5+8*0]=
463 h->intra4x4_pred_mode_cache[6+8*0]=
464 h->intra4x4_pred_mode_cache[7+8*0]= pred;
465 }
466 for(i=0; i<2; i++){
467 if(IS_INTRA4x4(left_type[i])){
468 h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]= h->intra4x4_pred_mode[left_xy[i]][left_block[0+2*i]];
469 h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= h->intra4x4_pred_mode[left_xy[i]][left_block[1+2*i]];
470 }else{
471 int pred;
472 if(IS_INTRA16x16(left_type[i]) || (IS_INTER(left_type[i]) && !h->pps.constrained_intra_pred))
473 pred= 2;
474 else{
475 pred= -1;
476 }
477 h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]=
478 h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= pred;
479 }
480 }
481 }
482 }
483
484
485 /*
486 0 . T T. T T T T
487 1 L . .L . . . .
488 2 L . .L . . . .
489 3 . T TL . . . .
490 4 L . .L . . . .
491 5 L . .. . . . .
492 */
493 //FIXME constraint_intra_pred & partitioning & nnz (lets hope this is just a typo in the spec)
494 if(top_type){
495 h->non_zero_count_cache[4+8*0]= h->non_zero_count[top_xy][0];
496 h->non_zero_count_cache[5+8*0]= h->non_zero_count[top_xy][1];
497 h->non_zero_count_cache[6+8*0]= h->non_zero_count[top_xy][2];
498 h->non_zero_count_cache[7+8*0]= h->non_zero_count[top_xy][3];
499
500 h->non_zero_count_cache[1+8*0]= h->non_zero_count[top_xy][7];
501 h->non_zero_count_cache[2+8*0]= h->non_zero_count[top_xy][8];
502
503 h->non_zero_count_cache[1+8*3]= h->non_zero_count[top_xy][10];
504 h->non_zero_count_cache[2+8*3]= h->non_zero_count[top_xy][11];
505 }else{
506 h->non_zero_count_cache[4+8*0]=
507 h->non_zero_count_cache[5+8*0]=
508 h->non_zero_count_cache[6+8*0]=
509 h->non_zero_count_cache[7+8*0]=
510
511 h->non_zero_count_cache[1+8*0]=
512 h->non_zero_count_cache[2+8*0]=
513
514 h->non_zero_count_cache[1+8*3]=
515 h->non_zero_count_cache[2+8*3]= 64;
516 }
517
518 if(left_type[0]){
519 h->non_zero_count_cache[3+8*1]= h->non_zero_count[left_xy[0]][6];
520 h->non_zero_count_cache[3+8*2]= h->non_zero_count[left_xy[0]][5];
521 h->non_zero_count_cache[0+8*1]= h->non_zero_count[left_xy[0]][9]; //FIXME left_block
522 h->non_zero_count_cache[0+8*4]= h->non_zero_count[left_xy[0]][12];
523 }else{
524 h->non_zero_count_cache[3+8*1]=
525 h->non_zero_count_cache[3+8*2]=
526 h->non_zero_count_cache[0+8*1]=
527 h->non_zero_count_cache[0+8*4]= 64;
528 }
529
530 if(left_type[1]){
531 h->non_zero_count_cache[3+8*3]= h->non_zero_count[left_xy[1]][4];
532 h->non_zero_count_cache[3+8*4]= h->non_zero_count[left_xy[1]][3];
533 h->non_zero_count_cache[0+8*2]= h->non_zero_count[left_xy[1]][8];
534 h->non_zero_count_cache[0+8*5]= h->non_zero_count[left_xy[1]][11];
535 }else{
536 h->non_zero_count_cache[3+8*3]=
537 h->non_zero_count_cache[3+8*4]=
538 h->non_zero_count_cache[0+8*2]=
539 h->non_zero_count_cache[0+8*5]= 64;
540 }
541
542 #if 1
543 if(IS_INTER(mb_type)){
544 int list;
545 for(list=0; list<2; list++){
546 if((!IS_8X8(mb_type)) && !USES_LIST(mb_type, list)){
547 /*if(!h->mv_cache_clean[list]){
548 memset(h->mv_cache [list], 0, 8*5*2*sizeof(int16_t)); //FIXME clean only input? clean at all?
549 memset(h->ref_cache[list], PART_NOT_AVAILABLE, 8*5*sizeof(int8_t));
550 h->mv_cache_clean[list]= 1;
551 }*/
552 continue; //FIXME direct mode ...
553 }
554 h->mv_cache_clean[list]= 0;
555
556 if(IS_INTER(topleft_type)){
557 const int b_xy = h->mb2b_xy[topleft_xy] + 3 + 3*h->b_stride;
558 const int b8_xy= h->mb2b8_xy[topleft_xy] + 1 + h->b8_stride;
559 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy];
560 h->ref_cache[list][scan8[0] - 1 - 1*8]= s->current_picture.ref_index[list][b8_xy];
561 }else{
562 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= 0;
563 h->ref_cache[list][scan8[0] - 1 - 1*8]= topleft_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
564 }
565
566 if(IS_INTER(top_type)){
567 const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride;
568 const int b8_xy= h->mb2b8_xy[top_xy] + h->b8_stride;
569 *(uint32_t*)h->mv_cache[list][scan8[0] + 0 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 0];
570 *(uint32_t*)h->mv_cache[list][scan8[0] + 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 1];
571 *(uint32_t*)h->mv_cache[list][scan8[0] + 2 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 2];
572 *(uint32_t*)h->mv_cache[list][scan8[0] + 3 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 3];
573 h->ref_cache[list][scan8[0] + 0 - 1*8]=
574 h->ref_cache[list][scan8[0] + 1 - 1*8]= s->current_picture.ref_index[list][b8_xy + 0];
575 h->ref_cache[list][scan8[0] + 2 - 1*8]=
576 h->ref_cache[list][scan8[0] + 3 - 1*8]= s->current_picture.ref_index[list][b8_xy + 1];
577 }else{
578 *(uint32_t*)h->mv_cache [list][scan8[0] + 0 - 1*8]=
579 *(uint32_t*)h->mv_cache [list][scan8[0] + 1 - 1*8]=
580 *(uint32_t*)h->mv_cache [list][scan8[0] + 2 - 1*8]=
581 *(uint32_t*)h->mv_cache [list][scan8[0] + 3 - 1*8]= 0;
582 *(uint32_t*)&h->ref_cache[list][scan8[0] + 0 - 1*8]= ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101;
583 }
584
585 if(IS_INTER(topright_type)){
586 const int b_xy= h->mb2b_xy[topright_xy] + 3*h->b_stride;
587 const int b8_xy= h->mb2b8_xy[topright_xy] + h->b8_stride;
588 *(uint32_t*)h->mv_cache[list][scan8[0] + 4 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy];
589 h->ref_cache[list][scan8[0] + 4 - 1*8]= s->current_picture.ref_index[list][b8_xy];
590 }else{
591 *(uint32_t*)h->mv_cache [list][scan8[0] + 4 - 1*8]= 0;
592 h->ref_cache[list][scan8[0] + 4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
593 }
594
595 //FIXME unify cleanup or sth
596 if(IS_INTER(left_type[0])){
597 const int b_xy= h->mb2b_xy[left_xy[0]] + 3;
598 const int b8_xy= h->mb2b8_xy[left_xy[0]] + 1;
599 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 0*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[0]];
600 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[1]];
601 h->ref_cache[list][scan8[0] - 1 + 0*8]=
602 h->ref_cache[list][scan8[0] - 1 + 1*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[0]>>1)];
603 }else{
604 *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 0*8]=
605 *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 1*8]= 0;
606 h->ref_cache[list][scan8[0] - 1 + 0*8]=
607 h->ref_cache[list][scan8[0] - 1 + 1*8]= left_type[0] ? LIST_NOT_USED : PART_NOT_AVAILABLE;
608 }
609
610 if(IS_INTER(left_type[1])){
611 const int b_xy= h->mb2b_xy[left_xy[1]] + 3;
612 const int b8_xy= h->mb2b8_xy[left_xy[1]] + 1;
613 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 2*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[2]];
614 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 3*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[3]];
615 h->ref_cache[list][scan8[0] - 1 + 2*8]=
616 h->ref_cache[list][scan8[0] - 1 + 3*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[2]>>1)];
617 }else{
618 *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 2*8]=
619 *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 3*8]= 0;
620 h->ref_cache[list][scan8[0] - 1 + 2*8]=
621 h->ref_cache[list][scan8[0] - 1 + 3*8]= left_type[0] ? LIST_NOT_USED : PART_NOT_AVAILABLE;
622 }
623
624 h->ref_cache[list][scan8[5 ]+1] =
625 h->ref_cache[list][scan8[7 ]+1] =
626 h->ref_cache[list][scan8[13]+1] = //FIXME remove past 3 (init somewher else)
627 h->ref_cache[list][scan8[4 ]] =
628 h->ref_cache[list][scan8[12]] = PART_NOT_AVAILABLE;
629 *(uint32_t*)h->mv_cache [list][scan8[5 ]+1]=
630 *(uint32_t*)h->mv_cache [list][scan8[7 ]+1]=
631 *(uint32_t*)h->mv_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewher else)
632 *(uint32_t*)h->mv_cache [list][scan8[4 ]]=
633 *(uint32_t*)h->mv_cache [list][scan8[12]]= 0;
634
635 if( h->pps.cabac ) {
636 /* XXX beurk, Load mvd */
637 if(IS_INTER(topleft_type)){
638 const int b_xy = h->mb2b_xy[topleft_xy] + 3 + 3*h->b_stride;
639 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy];
640 }else{
641 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 - 1*8]= 0;
642 }
643
644 if(IS_INTER(top_type)){
645 const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride;
646 *(uint32_t*)h->mvd_cache[list][scan8[0] + 0 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 0];
647 *(uint32_t*)h->mvd_cache[list][scan8[0] + 1 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 1];
648 *(uint32_t*)h->mvd_cache[list][scan8[0] + 2 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 2];
649 *(uint32_t*)h->mvd_cache[list][scan8[0] + 3 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 3];
650 }else{
651 *(uint32_t*)h->mvd_cache [list][scan8[0] + 0 - 1*8]=
652 *(uint32_t*)h->mvd_cache [list][scan8[0] + 1 - 1*8]=
653 *(uint32_t*)h->mvd_cache [list][scan8[0] + 2 - 1*8]=
654 *(uint32_t*)h->mvd_cache [list][scan8[0] + 3 - 1*8]= 0;
655 }
656 if(IS_INTER(left_type[0])){
657 const int b_xy= h->mb2b_xy[left_xy[0]] + 3;
658 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 0*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[0]];
659 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[1]];
660 }else{
661 *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 0*8]=
662 *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 1*8]= 0;
663 }
664 if(IS_INTER(left_type[1])){
665 const int b_xy= h->mb2b_xy[left_xy[1]] + 3;
666 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 2*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[2]];
667 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 3*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[3]];
668 }else{
669 *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 2*8]=
670 *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 3*8]= 0;
671 }
672 *(uint32_t*)h->mvd_cache [list][scan8[5 ]+1]=
673 *(uint32_t*)h->mvd_cache [list][scan8[7 ]+1]=
674 *(uint32_t*)h->mvd_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewher else)
675 *(uint32_t*)h->mvd_cache [list][scan8[4 ]]=
676 *(uint32_t*)h->mvd_cache [list][scan8[12]]= 0;
677 }
678 }
679 //FIXME
680 }
681 #endif
682 }
683
684 static inline void write_back_intra_pred_mode(H264Context *h){
685 MpegEncContext * const s = &h->s;
686 const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
687
688 h->intra4x4_pred_mode[mb_xy][0]= h->intra4x4_pred_mode_cache[7+8*1];
689 h->intra4x4_pred_mode[mb_xy][1]= h->intra4x4_pred_mode_cache[7+8*2];
690 h->intra4x4_pred_mode[mb_xy][2]= h->intra4x4_pred_mode_cache[7+8*3];
691 h->intra4x4_pred_mode[mb_xy][3]= h->intra4x4_pred_mode_cache[7+8*4];
692 h->intra4x4_pred_mode[mb_xy][4]= h->intra4x4_pred_mode_cache[4+8*4];
693 h->intra4x4_pred_mode[mb_xy][5]= h->intra4x4_pred_mode_cache[5+8*4];
694 h->intra4x4_pred_mode[mb_xy][6]= h->intra4x4_pred_mode_cache[6+8*4];
695 }
696
697 /**
698 * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
699 */
700 static inline int check_intra4x4_pred_mode(H264Context *h){
701 MpegEncContext * const s = &h->s;
702 static const int8_t top [12]= {-1, 0,LEFT_DC_PRED,-1,-1,-1,-1,-1, 0};
703 static const int8_t left[12]= { 0,-1, TOP_DC_PRED, 0,-1,-1,-1, 0,-1,DC_128_PRED};
704 int i;
705
706 if(!(h->top_samples_available&0x8000)){
707 for(i=0; i<4; i++){
708 int status= top[ h->intra4x4_pred_mode_cache[scan8[0] + i] ];
709 if(status<0){
710 av_log(h->s.avctx, AV_LOG_ERROR, "top block unavailable for requested intra4x4 mode %d at %d %d\n", status, s->mb_x, s->mb_y);
711 return -1;
712 } else if(status){
713 h->intra4x4_pred_mode_cache[scan8[0] + i]= status;
714 }
715 }
716 }
717
718 if(!(h->left_samples_available&0x8000)){
719 for(i=0; i<4; i++){
720 int status= left[ h->intra4x4_pred_mode_cache[scan8[0] + 8*i] ];
721 if(status<0){
722 av_log(h->s.avctx, AV_LOG_ERROR, "left block unavailable for requested intra4x4 mode %d at %d %d\n", status, s->mb_x, s->mb_y);
723 return -1;
724 } else if(status){
725 h->intra4x4_pred_mode_cache[scan8[0] + 8*i]= status;
726 }
727 }
728 }
729
730 return 0;
731 } //FIXME cleanup like next
732
733 /**
734 * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
735 */
736 static inline int check_intra_pred_mode(H264Context *h, int mode){
737 MpegEncContext * const s = &h->s;
738 static const int8_t top [7]= {LEFT_DC_PRED8x8, 1,-1,-1};
739 static const int8_t left[7]= { TOP_DC_PRED8x8,-1, 2,-1,DC_128_PRED8x8};
740
741 if(mode < 0 || mode > 6)
742 return -1;
743
744 if(!(h->top_samples_available&0x8000)){
745 mode= top[ mode ];
746 if(mode<0){
747 av_log(h->s.avctx, AV_LOG_ERROR, "top block unavailable for requested intra mode at %d %d\n", s->mb_x, s->mb_y);
748 return -1;
749 }
750 }
751
752 if(!(h->left_samples_available&0x8000)){
753 mode= left[ mode ];
754 if(mode<0){
755 av_log(h->s.avctx, AV_LOG_ERROR, "left block unavailable for requested intra mode at %d %d\n", s->mb_x, s->mb_y);
756 return -1;
757 }
758 }
759
760 return mode;
761 }
762
763 /**
764 * gets the predicted intra4x4 prediction mode.
765 */
766 static inline int pred_intra_mode(H264Context *h, int n){
767 const int index8= scan8[n];
768 const int left= h->intra4x4_pred_mode_cache[index8 - 1];
769 const int top = h->intra4x4_pred_mode_cache[index8 - 8];
770 const int min= FFMIN(left, top);
771
772 tprintf("mode:%d %d min:%d\n", left ,top, min);
773
774 if(min<0) return DC_PRED;
775 else return min;
776 }
777
778 static inline void write_back_non_zero_count(H264Context *h){
779 MpegEncContext * const s = &h->s;
780 const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
781
782 h->non_zero_count[mb_xy][0]= h->non_zero_count_cache[4+8*4];
783 h->non_zero_count[mb_xy][1]= h->non_zero_count_cache[5+8*4];
784 h->non_zero_count[mb_xy][2]= h->non_zero_count_cache[6+8*4];
785 h->non_zero_count[mb_xy][3]= h->non_zero_count_cache[7+8*4];
786 h->non_zero_count[mb_xy][4]= h->non_zero_count_cache[7+8*3];
787 h->non_zero_count[mb_xy][5]= h->non_zero_count_cache[7+8*2];
788 h->non_zero_count[mb_xy][6]= h->non_zero_count_cache[7+8*1];
789
790 h->non_zero_count[mb_xy][7]= h->non_zero_count_cache[1+8*2];
791 h->non_zero_count[mb_xy][8]= h->non_zero_count_cache[2+8*2];
792 h->non_zero_count[mb_xy][9]= h->non_zero_count_cache[2+8*1];
793
794 h->non_zero_count[mb_xy][10]=h->non_zero_count_cache[1+8*5];
795 h->non_zero_count[mb_xy][11]=h->non_zero_count_cache[2+8*5];
796 h->non_zero_count[mb_xy][12]=h->non_zero_count_cache[2+8*4];
797 }
798
799 /**
800 * gets the predicted number of non zero coefficients.
801 * @param n block index
802 */
803 static inline int pred_non_zero_count(H264Context *h, int n){
804 const int index8= scan8[n];
805 const int left= h->non_zero_count_cache[index8 - 1];
806 const int top = h->non_zero_count_cache[index8 - 8];
807 int i= left + top;
808
809 if(i<64) i= (i+1)>>1;
810
811 tprintf("pred_nnz L%X T%X n%d s%d P%X\n", left, top, n, scan8[n], i&31);
812
813 return i&31;
814 }
815
816 static inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, int i, int list, int part_width){
817 const int topright_ref= h->ref_cache[list][ i - 8 + part_width ];
818
819 if(topright_ref != PART_NOT_AVAILABLE){
820 *C= h->mv_cache[list][ i - 8 + part_width ];
821 return topright_ref;
822 }else{
823 tprintf("topright MV not available\n");
824
825 *C= h->mv_cache[list][ i - 8 - 1 ];
826 return h->ref_cache[list][ i - 8 - 1 ];
827 }
828 }
829
830 /**
831 * gets the predicted MV.
832 * @param n the block index
833 * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
834 * @param mx the x component of the predicted motion vector
835 * @param my the y component of the predicted motion vector
836 */
837 static inline void pred_motion(H264Context * const h, int n, int part_width, int list, int ref, int * const mx, int * const my){
838 const int index8= scan8[n];
839 const int top_ref= h->ref_cache[list][ index8 - 8 ];
840 const int left_ref= h->ref_cache[list][ index8 - 1 ];
841 const int16_t * const A= h->mv_cache[list][ index8 - 1 ];
842 const int16_t * const B= h->mv_cache[list][ index8 - 8 ];
843 const int16_t * C;
844 int diagonal_ref, match_count;
845
846 assert(part_width==1 || part_width==2 || part_width==4);
847
848 /* mv_cache
849 B . . A T T T T
850 U . . L . . , .
851 U . . L . . . .
852 U . . L . . , .
853 . . . L . . . .
854 */
855
856 diagonal_ref= fetch_diagonal_mv(h, &C, index8, list, part_width);
857 match_count= (diagonal_ref==ref) + (top_ref==ref) + (left_ref==ref);
858 if(match_count > 1){ //most common
859 *mx= mid_pred(A[0], B[0], C[0]);
860 *my= mid_pred(A[1], B[1], C[1]);
861 }else if(match_count==1){
862 if(left_ref==ref){
863 *mx= A[0];
864 *my= A[1];
865 }else if(top_ref==ref){
866 *mx= B[0];
867 *my= B[1];
868 }else{
869 *mx= C[0];
870 *my= C[1];
871 }
872 }else{
873 if(top_ref == PART_NOT_AVAILABLE && diagonal_ref == PART_NOT_AVAILABLE && left_ref != PART_NOT_AVAILABLE){
874 *mx= A[0];
875 *my= A[1];
876 }else{
877 *mx= mid_pred(A[0], B[0], C[0]);
878 *my= mid_pred(A[1], B[1], C[1]);
879 }
880 }
881
882 tprintf("pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref, A[0], A[1], ref, *mx, *my, h->s.mb_x, h->s.mb_y, n, list);
883 }
884
885 /**
886 * gets the directionally predicted 16x8 MV.
887 * @param n the block index
888 * @param mx the x component of the predicted motion vector
889 * @param my the y component of the predicted motion vector
890 */
891 static inline void pred_16x8_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){
892 if(n==0){
893 const int top_ref= h->ref_cache[list][ scan8[0] - 8 ];
894 const int16_t * const B= h->mv_cache[list][ scan8[0] - 8 ];
895
896 tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d", top_ref, B[0], B[1], h->s.mb_x, h->s.mb_y, n, list);
897
898 if(top_ref == ref){
899 *mx= B[0];
900 *my= B[1];
901 return;
902 }
903 }else{
904 const int left_ref= h->ref_cache[list][ scan8[8] - 1 ];
905 const int16_t * const A= h->mv_cache[list][ scan8[8] - 1 ];
906
907 tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list);
908
909 if(left_ref == ref){
910 *mx= A[0];
911 *my= A[1];
912 return;
913 }
914 }
915
916 //RARE
917 pred_motion(h, n, 4, list, ref, mx, my);
918 }
919
920 /**
921 * gets the directionally predicted 8x16 MV.
922 * @param n the block index
923 * @param mx the x component of the predicted motion vector
924 * @param my the y component of the predicted motion vector
925 */
926 static inline void pred_8x16_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){
927 if(n==0){
928 const int left_ref= h->ref_cache[list][ scan8[0] - 1 ];
929 const int16_t * const A= h->mv_cache[list][ scan8[0] - 1 ];
930
931 tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list);
932
933 if(left_ref == ref){
934 *mx= A[0];
935 *my= A[1];
936 return;
937 }
938 }else{
939 const int16_t * C;
940 int diagonal_ref;
941
942 diagonal_ref= fetch_diagonal_mv(h, &C, scan8[4], list, 2);
943
944 tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d", diagonal_ref, C[0], C[1], h->s.mb_x, h->s.mb_y, n, list);
945
946 if(diagonal_ref == ref){
947 *mx= C[0];
948 *my= C[1];
949 return;
950 }
951 }
952
953 //RARE
954 pred_motion(h, n, 2, list, ref, mx, my);
955 }
956
957 static inline void pred_pskip_motion(H264Context * const h, int * const mx, int * const my){
958 const int top_ref = h->ref_cache[0][ scan8[0] - 8 ];
959 const int left_ref= h->ref_cache[0][ scan8[0] - 1 ];
960
961 tprintf("pred_pskip: (%d) (%d) at %2d %2d", top_ref, left_ref, h->s.mb_x, h->s.mb_y);
962
963 if(top_ref == PART_NOT_AVAILABLE || left_ref == PART_NOT_AVAILABLE
964 || (top_ref == 0 && *(uint32_t*)h->mv_cache[0][ scan8[0] - 8 ] == 0)
965 || (left_ref == 0 && *(uint32_t*)h->mv_cache[0][ scan8[0] - 1 ] == 0)){
966
967 *mx = *my = 0;
968 return;
969 }
970
971 pred_motion(h, 0, 4, 0, 0, mx, my);
972
973 return;
974 }
975
976 static inline void write_back_motion(H264Context *h, int mb_type){
977 MpegEncContext * const s = &h->s;
978 const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride;
979 const int b8_xy= 2*s->mb_x + 2*s->mb_y*h->b8_stride;
980 int list;
981
982 for(list=0; list<2; list++){
983 int y;
984 if((!IS_8X8(mb_type)) && !USES_LIST(mb_type, list)){
985 if(1){ //FIXME skip or never read if mb_type doesnt use it
986 for(y=0; y<4; y++){
987 *(uint64_t*)s->current_picture.motion_val[list][b_xy + 0 + y*h->b_stride]=
988 *(uint64_t*)s->current_picture.motion_val[list][b_xy + 2 + y*h->b_stride]= 0;
989 }
990 if( h->pps.cabac ) {
991 /* FIXME needed ? */
992 for(y=0; y<4; y++){
993 *(uint64_t*)h->mvd_table[list][b_xy + 0 + y*h->b_stride]=
994 *(uint64_t*)h->mvd_table[list][b_xy + 2 + y*h->b_stride]= 0;
995 }
996 }
997 for(y=0; y<2; y++){
998 *(uint16_t*)s->current_picture.motion_val[list][b8_xy + y*h->b8_stride]= (LIST_NOT_USED&0xFF)*0x0101;
999 }
1000 }
1001 continue; //FIXME direct mode ...
1002 }
1003
1004 for(y=0; y<4; y++){
1005 *(uint64_t*)s->current_picture.motion_val[list][b_xy + 0 + y*h->b_stride]= *(uint64_t*)h->mv_cache[list][scan8[0]+0 + 8*y];
1006 *(uint64_t*)s->current_picture.motion_val[list][b_xy + 2 + y*h->b_stride]= *(uint64_t*)h->mv_cache[list][scan8[0]+2 + 8*y];
1007 }
1008 if( h->pps.cabac ) {
1009 for(y=0; y<4; y++){
1010 *(uint64_t*)h->mvd_table[list][b_xy + 0 + y*h->b_stride]= *(uint64_t*)h->mvd_cache[list][scan8[0]+0 + 8*y];
1011 *(uint64_t*)h->mvd_table[list][b_xy + 2 + y*h->b_stride]= *(uint64_t*)h->mvd_cache[list][scan8[0]+2 + 8*y];
1012 }
1013 }
1014 for(y=0; y<2; y++){
1015 s->current_picture.ref_index[list][b8_xy + 0 + y*h->b8_stride]= h->ref_cache[list][scan8[0]+0 + 16*y];
1016 s->current_picture.ref_index[list][b8_xy + 1 + y*h->b8_stride]= h->ref_cache[list][scan8[0]+2 + 16*y];
1017 }
1018 }
1019 }
1020
1021 /**
1022 * Decodes a network abstraction layer unit.
1023 * @param consumed is the number of bytes used as input
1024 * @param length is the length of the array
1025 * @param dst_length is the number of decoded bytes FIXME here or a decode rbsp ttailing?
1026 * @returns decoded bytes, might be src+1 if no escapes
1027 */
1028 static uint8_t *decode_nal(H264Context *h, uint8_t *src, int *dst_length, int *consumed, int length){
1029 int i, si, di;
1030 uint8_t *dst;
1031
1032 // src[0]&0x80; //forbidden bit
1033 h->nal_ref_idc= src[0]>>5;
1034 h->nal_unit_type= src[0]&0x1F;
1035
1036 src++; length--;
1037 #if 0
1038 for(i=0; i<length; i++)
1039 printf("%2X ", src[i]);
1040 #endif
1041 for(i=0; i+1<length; i+=2){
1042 if(src[i]) continue;
1043 if(i>0 && src[i-1]==0) i--;
1044 if(i+2<length && src[i+1]==0 && src[i+2]<=3){
1045 if(src[i+2]!=3){
1046 /* startcode, so we must be past the end */
1047 length=i;
1048 }
1049 break;
1050 }
1051 }
1052
1053 if(i>=length-1){ //no escaped 0
1054 *dst_length= length;
1055 *consumed= length+1; //+1 for the header
1056 return src;
1057 }
1058
1059 h->rbsp_buffer= av_fast_realloc(h->rbsp_buffer, &h->rbsp_buffer_size, length);
1060 dst= h->rbsp_buffer;
1061
1062 //printf("deoding esc\n");
1063 si=di=0;
1064 while(si<length){
1065 //remove escapes (very rare 1:2^22)
1066 if(si+2<length && src[si]==0 && src[si+1]==0 && src[si+2]<=3){
1067 if(src[si+2]==3){ //escape
1068 dst[di++]= 0;
1069 dst[di++]= 0;
1070 si+=3;
1071 continue;
1072 }else //next start code
1073 break;
1074 }
1075
1076 dst[di++]= src[si++];
1077 }
1078
1079 *dst_length= di;
1080 *consumed= si + 1;//+1 for the header
1081 //FIXME store exact number of bits in the getbitcontext (its needed for decoding)
1082 return dst;
1083 }
1084
1085 #if 0
1086 /**
1087 * @param src the data which should be escaped
1088 * @param dst the target buffer, dst+1 == src is allowed as a special case
1089 * @param length the length of the src data
1090 * @param dst_length the length of the dst array
1091 * @returns length of escaped data in bytes or -1 if an error occured
1092 */
1093 static int encode_nal(H264Context *h, uint8_t *dst, uint8_t *src, int length, int dst_length){
1094 int i, escape_count, si, di;
1095 uint8_t *temp;
1096
1097 assert(length>=0);
1098 assert(dst_length>0);
1099
1100 dst[0]= (h->nal_ref_idc<<5) + h->nal_unit_type;
1101
1102 if(length==0) return 1;
1103
1104 escape_count= 0;
1105 for(i=0; i<length; i+=2){
1106 if(src[i]) continue;
1107 if(i>0 && src[i-1]==0)
1108 i--;
1109 if(i+2<length && src[i+1]==0 && src[i+2]<=3){
1110 escape_count++;
1111 i+=2;
1112 }
1113 }
1114
1115 if(escape_count==0){
1116 if(dst+1 != src)
1117 memcpy(dst+1, src, length);
1118 return length + 1;
1119 }
1120
1121 if(length + escape_count + 1> dst_length)
1122 return -1;
1123
1124 //this should be damn rare (hopefully)
1125
1126 h->rbsp_buffer= av_fast_realloc(h->rbsp_buffer, &h->rbsp_buffer_size, length + escape_count);
1127 temp= h->rbsp_buffer;
1128 //printf("encoding esc\n");
1129
1130 si= 0;
1131 di= 0;
1132 while(si < length){
1133 if(si+2<length && src[si]==0 && src[si+1]==0 && src[si+2]<=3){
1134 temp[di++]= 0; si++;
1135 temp[di++]= 0; si++;
1136 temp[di++]= 3;
1137 temp[di++]= src[si++];
1138 }
1139 else
1140 temp[di++]= src[si++];
1141 }
1142 memcpy(dst+1, temp, length+escape_count);
1143
1144 assert(di == length+escape_count);
1145
1146 return di + 1;
1147 }
1148
1149 /**
1150 * write 1,10,100,1000,... for alignment, yes its exactly inverse to mpeg4
1151 */
1152 static void encode_rbsp_trailing(PutBitContext *pb){
1153 int length;
1154 put_bits(pb, 1, 1);
1155 length= (-put_bits_count(pb))&7;
1156 if(length) put_bits(pb, length, 0);
1157 }
1158 #endif
1159
1160 /**
1161 * identifies the exact end of the bitstream
1162 * @return the length of the trailing, or 0 if damaged
1163 */
1164 static int decode_rbsp_trailing(uint8_t *src){
1165 int v= *src;
1166 int r;
1167
1168 tprintf("rbsp trailing %X\n", v);
1169
1170 for(r=1; r<9; r++){
1171 if(v&1) return r;
1172 v>>=1;
1173 }
1174 return 0;
1175 }
1176
1177 /**
1178 * idct tranforms the 16 dc values and dequantize them.
1179 * @param qp quantization parameter
1180 */
1181 static void h264_luma_dc_dequant_idct_c(DCTELEM *block, int qp){
1182 const int qmul= dequant_coeff[qp][0];
1183 #define stride 16
1184 int i;
1185 int temp[16]; //FIXME check if this is a good idea
1186 static const int x_offset[4]={0, 1*stride, 4* stride, 5*stride};
1187 static const int y_offset[4]={0, 2*stride, 8* stride, 10*stride};
1188
1189 //memset(block, 64, 2*256);
1190 //return;
1191 for(i=0; i<4; i++){
1192 const int offset= y_offset[i];
1193 const int z0= block[offset+stride*0] + block[offset+stride*4];
1194 const int z1= block[offset+stride*0] - block[offset+stride*4];
1195 const int z2= block[offset+stride*1] - block[offset+stride*5];
1196 const int z3= block[offset+stride*1] + block[offset+stride*5];
1197
1198 temp[4*i+0]= z0+z3;
1199 temp[4*i+1]= z1+z2;
1200 temp[4*i+2]= z1-z2;
1201 temp[4*i+3]= z0-z3;
1202 }
1203
1204 for(i=0; i<4; i++){
1205 const int offset= x_offset[i];
1206 const int z0= temp[4*0+i] + temp[4*2+i];
1207 const int z1= temp[4*0+i] - temp[4*2+i];
1208 const int z2= temp[4*1+i] - temp[4*3+i];
1209 const int z3= temp[4*1+i] + temp[4*3+i];
1210
1211 block[stride*0 +offset]= ((z0 + z3)*qmul + 2)>>2; //FIXME think about merging this into decode_resdual
1212 block[stride*2 +offset]= ((z1 + z2)*qmul + 2)>>2;
1213 block[stride*8 +offset]= ((z1 - z2)*qmul + 2)>>2;
1214 block[stride*10+offset]= ((z0 - z3)*qmul + 2)>>2;
1215 }
1216 }
1217
1218 #if 0
1219 /**
1220 * dct tranforms the 16 dc values.
1221 * @param qp quantization parameter ??? FIXME
1222 */
1223 static void h264_luma_dc_dct_c(DCTELEM *block/*, int qp*/){
1224 // const int qmul= dequant_coeff[qp][0];
1225 int i;
1226 int temp[16]; //FIXME check if this is a good idea
1227 static const int x_offset[4]={0, 1*stride, 4* stride, 5*stride};
1228 static const int y_offset[4]={0, 2*stride, 8* stride, 10*stride};
1229
1230 for(i=0; i<4; i++){
1231 const int offset= y_offset[i];
1232 const int z0= block[offset+stride*0] + block[offset+stride*4];
1233 const int z1= block[offset+stride*0] - block[offset+stride*4];
1234 const int z2= block[offset+stride*1] - block[offset+stride*5];
1235 const int z3= block[offset+stride*1] + block[offset+stride*5];
1236
1237 temp[4*i+0]= z0+z3;
1238 temp[4*i+1]= z1+z2;
1239 temp[4*i+2]= z1-z2;
1240 temp[4*i+3]= z0-z3;
1241 }
1242
1243 for(i=0; i<4; i++){
1244 const int offset= x_offset[i];
1245 const int z0= temp[4*0+i] + temp[4*2+i];
1246 const int z1= temp[4*0+i] - temp[4*2+i];
1247 const int z2= temp[4*1+i] - temp[4*3+i];
1248 const int z3= temp[4*1+i] + temp[4*3+i];
1249
1250 block[stride*0 +offset]= (z0 + z3)>>1;
1251 block[stride*2 +offset]= (z1 + z2)>>1;
1252 block[stride*8 +offset]= (z1 - z2)>>1;
1253 block[stride*10+offset]= (z0 - z3)>>1;
1254 }
1255 }
1256 #endif
1257
1258 #undef xStride
1259 #undef stride
1260
1261 static void chroma_dc_dequant_idct_c(DCTELEM *block, int qp){
1262 const int qmul= dequant_coeff[qp][0];
1263 const int stride= 16*2;
1264 const int xStride= 16;
1265 int a,b,c,d,e;
1266
1267 a= block[stride*0 + xStride*0];
1268 b= block[stride*0 + xStride*1];
1269 c= block[stride*1 + xStride*0];
1270 d= block[stride*1 + xStride*1];
1271
1272 e= a-b;
1273 a= a+b;
1274 b= c-d;
1275 c= c+d;
1276
1277 block[stride*0 + xStride*0]= ((a+c)*qmul + 0)>>1;
1278 block[stride*0 + xStride*1]= ((e+b)*qmul + 0)>>1;
1279 block[stride*1 + xStride*0]= ((a-c)*qmul + 0)>>1;
1280 block[stride*1 + xStride*1]= ((e-b)*qmul + 0)>>1;
1281 }
1282
1283 #if 0
1284 static void chroma_dc_dct_c(DCTELEM *block){
1285 const int stride= 16*2;
1286 const int xStride= 16;
1287 int a,b,c,d,e;
1288
1289 a= block[stride*0 + xStride*0];
1290 b= block[stride*0 + xStride*1];
1291 c= block[stride*1 + xStride*0];
1292 d= block[stride*1 + xStride*1];
1293
1294 e= a-b;
1295 a= a+b;
1296 b= c-d;
1297 c= c+d;
1298
1299 block[stride*0 + xStride*0]= (a+c);
1300 block[stride*0 + xStride*1]= (e+b);
1301 block[stride*1 + xStride*0]= (a-c);
1302 block[stride*1 + xStride*1]= (e-b);
1303 }
1304 #endif
1305
1306 /**
1307 * gets the chroma qp.
1308 */
1309 static inline int get_chroma_qp(H264Context *h, int qscale){
1310
1311 return chroma_qp[clip(qscale + h->pps.chroma_qp_index_offset, 0, 51)];
1312 }
1313
1314
1315 /**
1316 *
1317 */
1318 static void h264_add_idct_c(uint8_t *dst, DCTELEM *block, int stride){
1319 int i;
1320 uint8_t *cm = cropTbl + MAX_NEG_CROP;
1321
1322 block[0] += 32;
1323
1324 for(i=0; i<4; i++){
1325 const int z0= block[0 + 4*i] + block[2 + 4*i];
1326 const int z1= block[0 + 4*i] - block[2 + 4*i];
1327 const int z2= (block[1 + 4*i]>>1) - block[3 + 4*i];
1328 const int z3= block[1 + 4*i] + (block[3 + 4*i]>>1);
1329
1330 block[0 + 4*i]= z0 + z3;
1331 block[1 + 4*i]= z1 + z2;
1332 block[2 + 4*i]= z1 - z2;
1333 block[3 + 4*i]= z0 - z3;
1334 }
1335
1336 for(i=0; i<4; i++){
1337 const int z0= block[i + 4*0] + block[i + 4*2];
1338 const int z1= block[i + 4*0] - block[i + 4*2];
1339 const int z2= (block[i + 4*1]>>1) - block[i + 4*3];
1340 const int z3= block[i + 4*1] + (block[i + 4*3]>>1);
1341
1342 dst[i + 0*stride]= cm[ dst[i + 0*stride] + ((z0 + z3) >> 6) ];
1343 dst[i + 1*stride]= cm[ dst[i + 1*stride] + ((z1 + z2) >> 6) ];
1344 dst[i + 2*stride]= cm[ dst[i + 2*stride] + ((z1 - z2) >> 6) ];
1345 dst[i + 3*stride]= cm[ dst[i + 3*stride] + ((z0 - z3) >> 6) ];
1346 }
1347 }
1348
1349 #if 0
1350 static void h264_diff_dct_c(DCTELEM *block, uint8_t *src1, uint8_t *src2, int stride){
1351 int i;
1352 //FIXME try int temp instead of block
1353
1354 for(i=0; i<4; i++){
1355 const int d0= src1[0 + i*stride] - src2[0 + i*stride];
1356 const int d1= src1[1 + i*stride] - src2[1 + i*stride];
1357 const int d2= src1[2 + i*stride] - src2[2 + i*stride];
1358 const int d3= src1[3 + i*stride] - src2[3 + i*stride];
1359 const int z0= d0 + d3;
1360 const int z3= d0 - d3;
1361 const int z1= d1 + d2;
1362 const int z2= d1 - d2;
1363
1364 block[0 + 4*i]= z0 + z1;
1365 block[1 + 4*i]= 2*z3 + z2;
1366 block[2 + 4*i]= z0 - z1;
1367 block[3 + 4*i]= z3 - 2*z2;
1368 }
1369
1370 for(i=0; i<4; i++){
1371 const int z0= block[0*4 + i] + block[3*4 + i];
1372 const int z3= block[0*4 + i] - block[3*4 + i];
1373 const int z1= block[1*4 + i] + block[2*4 + i];
1374 const int z2= block[1*4 + i] - block[2*4 + i];
1375
1376 block[0*4 + i]= z0 + z1;
1377 block[1*4 + i]= 2*z3 + z2;
1378 block[2*4 + i]= z0 - z1;
1379 block[3*4 + i]= z3 - 2*z2;
1380 }
1381 }
1382 #endif
1383
1384 //FIXME need to check that this doesnt overflow signed 32 bit for low qp, iam not sure, its very close
1385 //FIXME check that gcc inlines this (and optimizes intra & seperate_dc stuff away)
1386 static inline int quantize_c(DCTELEM *block, uint8_t *scantable, int qscale, int intra, int seperate_dc){
1387 int i;
1388 const int * const quant_table= quant_coeff[qscale];
1389 const int bias= intra ? (1<<QUANT_SHIFT)/3 : (1<<QUANT_SHIFT)/6;
1390 const unsigned int threshold1= (1<<QUANT_SHIFT) - bias - 1;
1391 const unsigned int threshold2= (threshold1<<1);
1392 int last_non_zero;
1393
1394 if(seperate_dc){
1395 if(qscale<=18){
1396 //avoid overflows
1397 const int dc_bias= intra ? (1<<(QUANT_SHIFT-2))/3 : (1<<(QUANT_SHIFT-2))/6;
1398 const unsigned int dc_threshold1= (1<<(QUANT_SHIFT-2)) - dc_bias - 1;
1399 const unsigned int dc_threshold2= (dc_threshold1<<1);
1400
1401 int level= block[0]*quant_coeff[qscale+18][0];
1402 if(((unsigned)(level+dc_threshold1))>dc_threshold2){
1403 if(level>0){
1404 level= (dc_bias + level)>>(QUANT_SHIFT-2);
1405 block[0]= level;
1406 }else{
1407 level= (dc_bias - level)>>(QUANT_SHIFT-2);
1408 block[0]= -level;
1409 }
1410 // last_non_zero = i;
1411 }else{
1412 block[0]=0;
1413 }
1414 }else{
1415 const int dc_bias= intra ? (1<<(QUANT_SHIFT+1))/3 : (1<<(QUANT_SHIFT+1))/6;
1416 const unsigned int dc_threshold1= (1<<(QUANT_SHIFT+1)) - dc_bias - 1;
1417 const unsigned int dc_threshold2= (dc_threshold1<<1);
1418
1419 int level= block[0]*quant_table[0];
1420 if(((unsigned)(level+dc_threshold1))>dc_threshold2){
1421 if(level>0){
1422 level= (dc_bias + level)>>(QUANT_SHIFT+1);
1423 block[0]= level;
1424 }else{
1425 level= (dc_bias - level)>>(QUANT_SHIFT+1);
1426 block[0]= -level;
1427 }
1428 // last_non_zero = i;
1429 }else{
1430 block[0]=0;
1431 }
1432 }
1433 last_non_zero= 0;
1434 i=1;
1435 }else{
1436 last_non_zero= -1;
1437 i=0;
1438 }
1439
1440 for(; i<16; i++){
1441 const int j= scantable[i];
1442 int level= block[j]*quant_table[j];
1443
1444 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
1445 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
1446 if(((unsigned)(level+threshold1))>threshold2){
1447 if(level>0){
1448 level= (bias + level)>>QUANT_SHIFT;
1449 block[j]= level;
1450 }else{
1451 level= (bias - level)>>QUANT_SHIFT;
1452 block[j]= -level;
1453 }
1454 last_non_zero = i;
1455 }else{
1456 block[j]=0;
1457 }
1458 }
1459
1460 return last_non_zero;
1461 }
1462
1463 static void pred4x4_vertical_c(uint8_t *src, uint8_t *topright, int stride){
1464 const uint32_t a= ((uint32_t*)(src-stride))[0];
1465 ((uint32_t*)(src+0*stride))[0]= a;
1466 ((uint32_t*)(src+1*stride))[0]= a;
1467 ((uint32_t*)(src+2*stride))[0]= a;
1468 ((uint32_t*)(src+3*stride))[0]= a;
1469 }
1470
1471 static void pred4x4_horizontal_c(uint8_t *src, uint8_t *topright, int stride){
1472 ((uint32_t*)(src+0*stride))[0]= src[-1+0*stride]*0x01010101;
1473 ((uint32_t*)(src+1*stride))[0]= src[-1+1*stride]*0x01010101;
1474 ((uint32_t*)(src+2*stride))[0]= src[-1+2*stride]*0x01010101;
1475 ((uint32_t*)(src+3*stride))[0]= src[-1+3*stride]*0x01010101;
1476 }
1477
1478 static void pred4x4_dc_c(uint8_t *src, uint8_t *topright, int stride){
1479 const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride]
1480 + src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 4) >>3;
1481
1482 ((uint32_t*)(src+0*stride))[0]=
1483 ((uint32_t*)(src+1*stride))[0]=
1484 ((uint32_t*)(src+2*stride))[0]=
1485 ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
1486 }
1487
1488 static void pred4x4_left_dc_c(uint8_t *src, uint8_t *topright, int stride){
1489 const int dc= ( src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 2) >>2;
1490
1491 ((uint32_t*)(src+0*stride))[0]=
1492 ((uint32_t*)(src+1*stride))[0]=
1493 ((uint32_t*)(src+2*stride))[0]=
1494 ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
1495 }
1496
1497 static void pred4x4_top_dc_c(uint8_t *src, uint8_t *topright, int stride){
1498 const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride] + 2) >>2;
1499
1500 ((uint32_t*)(src+0*stride))[0]=
1501 ((uint32_t*)(src+1*stride))[0]=
1502 ((uint32_t*)(src+2*stride))[0]=
1503 ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
1504 }
1505
1506 static void pred4x4_128_dc_c(uint8_t *src, uint8_t *topright, int stride){
1507 ((uint32_t*)(src+0*stride))[0]=
1508 ((uint32_t*)(src+1*stride))[0]=
1509 ((uint32_t*)(src+2*stride))[0]=
1510 ((uint32_t*)(src+3*stride))[0]= 128U*0x01010101U;
1511 }
1512
1513
1514 #define LOAD_TOP_RIGHT_EDGE\
1515 const int t4= topright[0];\
1516 const int t5= topright[1];\
1517 const int t6= topright[2];\
1518 const int t7= topright[3];\
1519
1520 #define LOAD_LEFT_EDGE\
1521 const int l0= src[-1+0*stride];\
1522 const int l1= src[-1+1*stride];\
1523 const int l2= src[-1+2*stride];\
1524 const int l3= src[-1+3*stride];\
1525
1526 #define LOAD_TOP_EDGE\
1527 const int t0= src[ 0-1*stride];\
1528 const int t1= src[ 1-1*stride];\
1529 const int t2= src[ 2-1*stride];\
1530 const int t3= src[ 3-1*stride];\
1531
1532 static void pred4x4_down_right_c(uint8_t *src, uint8_t *topright, int stride){
1533 const int lt= src[-1-1*stride];
1534 LOAD_TOP_EDGE
1535 LOAD_LEFT_EDGE
1536
1537 src[0+3*stride]=(l3 + 2*l2 + l1 + 2)>>2;
1538 src[0+2*stride]=
1539 src[1+3*stride]=(l2 + 2*l1 + l0 + 2)>>2;
1540 src[0+1*stride]=
1541 src[1+2*stride]=
1542 src[2+3*stride]=(l1 + 2*l0 + lt + 2)>>2;
1543 src[0+0*stride]=
1544 src[1+1*stride]=
1545 src[2+2*stride]=
1546 src[3+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
1547 src[1+0*stride]=
1548 src[2+1*stride]=
1549 src[3+2*stride]=(lt + 2*t0 + t1 + 2)>>2;
1550 src[2+0*stride]=
1551 src[3+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
1552 src[3+0*stride]=(t1 + 2*t2 + t3 + 2)>>2;
1553 }
1554
1555 static void pred4x4_down_left_c(uint8_t *src, uint8_t *topright, int stride){
1556 LOAD_TOP_EDGE
1557 LOAD_TOP_RIGHT_EDGE
1558 // LOAD_LEFT_EDGE
1559
1560 src[0+0*stride]=(t0 + t2 + 2*t1 + 2)>>2;
1561 src[1+0*stride]=
1562 src[0+1*stride]=(t1 + t3 + 2*t2 + 2)>>2;
1563 src[2+0*stride]=
1564 src[1+1*stride]=
1565 src[0+2*stride]=(t2 + t4 + 2*t3 + 2)>>2;
1566 src[3+0*stride]=
1567 src[2+1*stride]=
1568 src[1+2*stride]=
1569 src[0+3*stride]=(t3 + t5 + 2*t4 + 2)>>2;
1570 src[3+1*stride]=
1571 src[2+2*stride]=
1572 src[1+3*stride]=(t4 + t6 + 2*t5 + 2)>>2;
1573 src[3+2*stride]=
1574 src[2+3*stride]=(t5 + t7 + 2*t6 + 2)>>2;
1575 src[3+3*stride]=(t6 + 3*t7 + 2)>>2;
1576 }
1577
1578 static void pred4x4_vertical_right_c(uint8_t *src, uint8_t *topright, int stride){
1579 const int lt= src[-1-1*stride];
1580 LOAD_TOP_EDGE
1581 LOAD_LEFT_EDGE
1582 const __attribute__((unused)) int unu= l3;
1583
1584 src[0+0*stride]=
1585 src[1+2*stride]=(lt + t0 + 1)>>1;
1586 src[1+0*stride]=
1587 src[2+2*stride]=(t0 + t1 + 1)>>1;
1588 src[2+0*stride]=
1589 src[3+2*stride]=(t1 + t2 + 1)>>1;
1590 src[3+0*stride]=(t2 + t3 + 1)>>1;
1591 src[0+1*stride]=
1592 src[1+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
1593 src[1+1*stride]=
1594 src[2+3*stride]=(lt + 2*t0 + t1 + 2)>>2;
1595 src[2+1*stride]=
1596 src[3+3*stride]=(t0 + 2*t1 + t2 + 2)>>2;
1597 src[3+1*stride]=(t1 + 2*t2 + t3 + 2)>>2;
1598 src[0+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
1599 src[0+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
1600 }
1601
1602 static void pred4x4_vertical_left_c(uint8_t *src, uint8_t *topright, int stride){
1603 LOAD_TOP_EDGE
1604 LOAD_TOP_RIGHT_EDGE
1605 const __attribute__((unused)) int unu= t7;
1606
1607 src[0+0*stride]=(t0 + t1 + 1)>>1;
1608 src[1+0*stride]=
1609 src[0+2*stride]=(t1 + t2 + 1)>>1;
1610 src[2+0*stride]=
1611 src[1+2*stride]=(t2 + t3 + 1)>>1;
1612 src[3+0*stride]=
1613 src[2+2*stride]=(t3 + t4+ 1)>>1;
1614 src[3+2*stride]=(t4 + t5+ 1)>>1;
1615 src[0+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
1616 src[1+1*stride]=
1617 src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
1618 src[2+1*stride]=
1619 src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
1620 src[3+1*stride]=
1621 src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
1622 src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2;
1623 }
1624
1625 static void pred4x4_horizontal_up_c(uint8_t *src, uint8_t *topright, int stride){
1626 LOAD_LEFT_EDGE
1627
1628 src[0+0*stride]=(l0 + l1 + 1)>>1;
1629 src[1+0*stride]=(l0 + 2*l1 + l2 + 2)>>2;
1630 src[2+0*stride]=
1631 src[0+1*stride]=(l1 + l2 + 1)>>1;
1632 src[3+0*stride]=
1633 src[1+1*stride]=(l1 + 2*l2 + l3 + 2)>>2;
1634 src[2+1*stride]=
1635 src[0+2*stride]=(l2 + l3 + 1)>>1;
1636 src[3+1*stride]=
1637 src[1+2*stride]=(l2 + 2*l3 + l3 + 2)>>2;
1638 src[3+2*stride]=
1639 src[1+3*stride]=
1640 src[0+3*stride]=
1641 src[2+2*stride]=
1642 src[2+3*stride]=
1643 src[3+3*stride]=l3;
1644 }
1645
1646 static void pred4x4_horizontal_down_c(uint8_t *src, uint8_t *topright, int stride){
1647 const int lt= src[-1-1*stride];
1648 LOAD_TOP_EDGE
1649 LOAD_LEFT_EDGE
1650 const __attribute__((unused)) int unu= t3;
1651
1652 src[0+0*stride]=
1653 src[2+1*stride]=(lt + l0 + 1)>>1;
1654 src[1+0*stride]=
1655 src[3+1*stride]=(l0 + 2*lt + t0 + 2)>>2;
1656 src[2+0*stride]=(lt + 2*t0 + t1 + 2)>>2;
1657 src[3+0*stride]=(t0 + 2*t1 + t2 + 2)>>2;
1658 src[0+1*stride]=
1659 src[2+2*stride]=(l0 + l1 + 1)>>1;
1660 src[1+1*stride]=
1661 src[3+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
1662 src[0+2*stride]=
1663 src[2+3*stride]=(l1 + l2+ 1)>>1;
1664 src[1+2*stride]=
1665 src[3+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
1666 src[0+3*stride]=(l2 + l3 + 1)>>1;
1667 src[1+3*stride]=(l1 + 2*l2 + l3 + 2)>>2;
1668 }
1669
1670 static void pred16x16_vertical_c(uint8_t *src, int stride){
1671 int i;
1672 const uint32_t a= ((uint32_t*)(src-stride))[0];
1673 const uint32_t b= ((uint32_t*)(src-stride))[1];
1674 const uint32_t c= ((uint32_t*)(src-stride))[2];
1675 const uint32_t d= ((uint32_t*)(src-stride))[3];
1676
1677 for(i=0; i<16; i++){
1678 ((uint32_t*)(src+i*stride))[0]= a;
1679 ((uint32_t*)(src+i*stride))[1]= b;
1680 ((uint32_t*)(src+i*stride))[2]= c;
1681 ((uint32_t*)(src+i*stride))[3]= d;
1682 }
1683 }
1684
1685 static void pred16x16_horizontal_c(uint8_t *src, int stride){
1686 int i;
1687
1688 for(i=0; i<16; i++){
1689 ((uint32_t*)(src+i*stride))[0]=
1690 ((uint32_t*)(src+i*stride))[1]=
1691 ((uint32_t*)(src+i*stride))[2]=
1692 ((uint32_t*)(src+i*stride))[3]= src[-1+i*stride]*0x01010101;
1693 }
1694 }
1695
1696 static void pred16x16_dc_c(uint8_t *src, int stride){
1697 int i, dc=0;
1698
1699 for(i=0;i<16; i++){
1700 dc+= src[-1+i*stride];
1701 }
1702
1703 for(i=0;i<16; i++){
1704 dc+= src[i-stride];
1705 }
1706
1707 dc= 0x01010101*((dc + 16)>>5);
1708
1709 for(i=0; i<16; i++){
1710 ((uint32_t*)(src+i*stride))[0]=
1711 ((uint32_t*)(src+i*stride))[1]=
1712 ((uint32_t*)(src+i*stride))[2]=
1713 ((uint32_t*)(src+i*stride))[3]= dc;
1714 }
1715 }
1716
1717 static void pred16x16_left_dc_c(uint8_t *src, int stride){
1718 int i, dc=0;
1719
1720 for(i=0;i<16; i++){
1721 dc+= src[-1+i*stride];
1722 }
1723
1724 dc= 0x01010101*((dc + 8)>>4);
1725
1726 for(i=0; i<16; i++){
1727 ((uint32_t*)(src+i*stride))[0]=
1728 ((uint32_t*)(src+i*stride))[1]=
1729 ((uint32_t*)(src+i*stride))[2]=
1730 ((uint32_t*)(src+i*stride))[3]= dc;
1731 }
1732 }
1733
1734 static void pred16x16_top_dc_c(uint8_t *src, int stride){
1735 int i, dc=0;
1736
1737 for(i=0;i<16; i++){
1738 dc+= src[i-stride];
1739 }
1740 dc= 0x01010101*((dc + 8)>>4);
1741
1742 for(i=0; i<16; i++){
1743 ((uint32_t*)(src+i*stride))[0]=
1744 ((uint32_t*)(src+i*stride))[1]=
1745 ((uint32_t*)(src+i*stride))[2]=
1746 ((uint32_t*)(src+i*stride))[3]= dc;
1747 }
1748 }
1749
1750 static void pred16x16_128_dc_c(uint8_t *src, int stride){
1751 int i;
1752
1753 for(i=0; i<16; i++){
1754 ((uint32_t*)(src+i*stride))[0]=
1755 ((uint32_t*)(src+i*stride))[1]=
1756 ((uint32_t*)(src+i*stride))[2]=
1757 ((uint32_t*)(src+i*stride))[3]= 0x01010101U*128U;
1758 }
1759 }
1760
1761 static inline void pred16x16_plane_compat_c(uint8_t *src, int stride, const int svq3){
1762 int i, j, k;
1763 int a;
1764 uint8_t *cm = cropTbl + MAX_NEG_CROP;
1765 const uint8_t * const src0 = src+7-stride;
1766 const uint8_t *src1 = src+8*stride-1;
1767 const uint8_t *src2 = src1-2*stride; // == src+6*stride-1;
1768 int H = src0[1] - src0[-1];
1769 int V = src1[0] - src2[ 0];
1770 for(k=2; k<=8; ++k) {
1771 src1 += stride; src2 -= stride;
1772 H += k*(src0[k] - src0[-k]);
1773 V += k*(src1[0] - src2[ 0]);
1774 }
1775 if(svq3){
1776 H = ( 5*(H/4) ) / 16;
1777 V = ( 5*(V/4) ) / 16;
1778
1779 /* required for 100% accuracy */
1780 i = H; H = V; V = i;
1781 }else{
1782 H = ( 5*H+32 ) >> 6;
1783 V = ( 5*V+32 ) >> 6;
1784 }
1785
1786 a = 16*(src1[0] + src2[16] + 1) - 7*(V+H);
1787 for(j=16; j>0; --j) {
1788 int b = a;
1789 a += V;
1790 for(i=-16; i<0; i+=4) {
1791 src[16+i] = cm[ (b ) >> 5 ];
1792 src[17+i] = cm[ (b+ H) >> 5 ];
1793 src[18+i] = cm[ (b+2*H) >> 5 ];
1794 src[19+i] = cm[ (b+3*H) >> 5 ];
1795 b += 4*H;
1796 }
1797 src += stride;
1798 }
1799 }
1800
1801 static void pred16x16_plane_c(uint8_t *src, int stride){
1802 pred16x16_plane_compat_c(src, stride, 0);
1803 }
1804
1805 static void pred8x8_vertical_c(uint8_t *src, int stride){
1806 int i;
1807 const uint32_t a= ((uint32_t*)(src-stride))[0];
1808 const uint32_t b= ((uint32_t*)(src-stride))[1];
1809
1810 for(i=0; i<8; i++){
1811 ((uint32_t*)(src+i*stride))[0]= a;
1812 ((uint32_t*)(src+i*stride))[1]= b;
1813 }
1814 }
1815
1816 static void pred8x8_horizontal_c(uint8_t *src, int stride){
1817 int i;
1818
1819 for(i=0; i<8; i++){
1820 ((uint32_t*)(src+i*stride))[0]=
1821 ((uint32_t*)(src+i*stride))[1]= src[-1+i*stride]*0x01010101;
1822 }
1823 }
1824
1825 static void pred8x8_128_dc_c(uint8_t *src, int stride){
1826 int i;
1827
1828 for(i=0; i<4; i++){
1829 ((uint32_t*)(src+i*stride))[0]=
1830 ((uint32_t*)(src+i*stride))[1]= 0x01010101U*128U;
1831 }
1832 for(i=4; i<8; i++){
1833 ((uint32_t*)(src+i*stride))[0]=
1834 ((uint32_t*)(src+i*stride))[1]= 0x01010101U*128U;
1835 }
1836 }
1837
1838 static void pred8x8_left_dc_c(uint8_t *src, int stride){
1839 int i;
1840 int dc0, dc2;
1841
1842 dc0=dc2=0;
1843 for(i=0;i<4; i++){
1844 dc0+= src[-1+i*stride];
1845 dc2+= src[-1+(i+4)*stride];
1846 }
1847 dc0= 0x01010101*((dc0 + 2)>>2);
1848 dc2= 0x01010101*((dc2 + 2)>>2);
1849
1850 for(i=0; i<4; i++){
1851 ((uint32_t*)(src+i*stride))[0]=
1852 ((uint32_t*)(src+i*stride))[1]= dc0;
1853 }
1854 for(i=4; i<8; i++){
1855 ((uint32_t*)(src+i*stride))[0]=
1856 ((uint32_t*)(src+i*stride))[1]= dc2;
1857 }
1858 }
1859
1860 static void pred8x8_top_dc_c(uint8_t *src, int stride){
1861 int i;
1862 int dc0, dc1;
1863
1864 dc0=dc1=0;
1865 for(i=0;i<4; i++){
1866 dc0+= src[i-stride];
1867 dc1+= src[4+i-stride];
1868 }
1869 dc0= 0x01010101*((dc0 + 2)>>2);
1870 dc1= 0x01010101*((dc1 + 2)>>2);
1871
1872 for(i=0; i<4; i++){
1873 ((uint32_t*)(src+i*stride))[0]= dc0;
1874 ((uint32_t*)(src+i*stride))[1]= dc1;
1875 }
1876 for(i=4; i<8; i++){
1877 ((uint32_t*)(src+i*stride))[0]= dc0;
1878 ((uint32_t*)(src+i*stride))[1]= dc1;
1879 }
1880 }
1881
1882
1883 static void pred8x8_dc_c(uint8_t *src, int stride){
1884 int i;
1885 int dc0, dc1, dc2, dc3;
1886
1887 dc0=dc1=dc2=0;
1888 for(i=0;i<4; i++){
1889 dc0+= src[-1+i*stride] + src[i-stride];
1890 dc1+= src[4+i-stride];
1891 dc2+= src[-1+(i+4)*stride];
1892 }
1893 dc3= 0x01010101*((dc1 + dc2 + 4)>>3);
1894 dc0= 0x01010101*((dc0 + 4)>>3);
1895 dc1= 0x01010101*((dc1 + 2)>>2);
1896 dc2= 0x01010101*((dc2 + 2)>>2);
1897
1898 for(i=0; i<4; i++){
1899 ((uint32_t*)(src+i*stride))[0]= dc0;
1900 ((uint32_t*)(src+i*stride))[1]= dc1;
1901 }
1902 for(i=4; i<8; i++){
1903 ((uint32_t*)(src+i*stride))[0]= dc2;
1904 ((uint32_t*)(src+i*stride))[1]= dc3;
1905 }
1906 }
1907
1908 static void pred8x8_plane_c(uint8_t *src, int stride){
1909 int j, k;
1910 int a;
1911 uint8_t *cm = cropTbl + MAX_NEG_CROP;
1912 const uint8_t * const src0 = src+3-stride;
1913 const uint8_t *src1 = src+4*stride-1;
1914 const uint8_t *src2 = src1-2*stride; // == src+2*stride-1;
1915 int H = src0[1] - src0[-1];
1916 int V = src1[0] - src2[ 0];
1917 for(k=2; k<=4; ++k) {
1918 src1 += stride; src2 -= stride;
1919 H += k*(src0[k] - src0[-k]);
1920 V += k*(src1[0] - src2[ 0]);
1921 }
1922 H = ( 17*H+16 ) >> 5;
1923 V = ( 17*V+16 ) >> 5;
1924
1925 a = 16*(src1[0] + src2[8]+1) - 3*(V+H);
1926 for(j=8; j>0; --j) {
1927 int b = a;
1928 a += V;
1929 src[0] = cm[ (b ) >> 5 ];
1930 src[1] = cm[ (b+ H) >> 5 ];
1931 src[2] = cm[ (b+2*H) >> 5 ];
1932 src[3] = cm[ (b+3*H) >> 5 ];
1933 src[4] = cm[ (b+4*H) >> 5 ];
1934 src[5] = cm[ (b+5*H) >> 5 ];
1935 src[6] = cm[ (b+6*H) >> 5 ];
1936 src[7] = cm[ (b+7*H) >> 5 ];
1937 src += stride;
1938 }
1939 }
1940
1941 static inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square, int chroma_height, int delta, int list,
1942 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1943 int src_x_offset, int src_y_offset,
1944 qpel_mc_func *qpix_op, h264_chroma_mc_func chroma_op){
1945 MpegEncContext * const s = &h->s;
1946 const int mx= h->mv_cache[list][ scan8[n] ][0] + src_x_offset*8;
1947 const int my= h->mv_cache[list][ scan8[n] ][1] + src_y_offset*8;
1948 const int luma_xy= (mx&3) + ((my&3)<<2);
1949 uint8_t * src_y = pic->data[0] + (mx>>2) + (my>>2)*s->linesize;
1950 uint8_t * src_cb= pic->data[1] + (mx>>3) + (my>>3)*s->uvlinesize;
1951 uint8_t * src_cr= pic->data[2] + (mx>>3) + (my>>3)*s->uvlinesize;
1952 int extra_width= (s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16; //FIXME increase edge?, IMHO not worth it
1953 int extra_height= extra_width;
1954 int emu=0;
1955 const int full_mx= mx>>2;
1956 const int full_my= my>>2;
1957
1958 assert(pic->data[0]);
1959
1960 if(mx&7) extra_width -= 3;
1961 if(my&7) extra_height -= 3;
1962
1963 if( full_mx < 0-extra_width
1964 || full_my < 0-extra_height
1965 || full_mx + 16/*FIXME*/ > s->width + extra_width
1966 || full_my + 16/*FIXME*/ > s->height + extra_height){
1967 ff_emulated_edge_mc(s->edge_emu_buffer, src_y - 2 - 2*s->linesize, s->linesize, 16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, s->width, s->height);
1968 src_y= s->edge_emu_buffer + 2 + 2*s->linesize;
1969 emu=1;
1970 }
1971
1972 qpix_op[luma_xy](dest_y, src_y, s->linesize); //FIXME try variable height perhaps?
1973 if(!square){
1974 qpix_op[luma_xy](dest_y + delta, src_y + delta, s->linesize);
1975 }
1976
1977 if(s->flags&CODEC_FLAG_GRAY) return;
1978
1979 if(emu){
1980 ff_emulated_edge_mc(s->edge_emu_buffer, src_cb, s->uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), s->width>>1, s->height>>1);
1981 src_cb= s->edge_emu_buffer;
1982 }
1983 chroma_op(dest_cb, src_cb, s->uvlinesize, chroma_height, mx&7, my&7);
1984
1985 if(emu){
1986 ff_emulated_edge_mc(s->edge_emu_buffer, src_cr, s->uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), s->width>>1, s->height>>1);
1987 src_cr= s->edge_emu_buffer;
1988 }
1989 chroma_op(dest_cr, src_cr, s->uvlinesize, chroma_height, mx&7, my&7);
1990 }
1991
1992 static inline void mc_part(H264Context *h, int n, int square, int chroma_height, int delta,
1993 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1994 int x_offset, int y_offset,
1995 qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put,
1996 qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg,
1997 int list0, int list1){
1998 MpegEncContext * const s = &h->s;
1999 qpel_mc_func *qpix_op= qpix_put;
2000 h264_chroma_mc_func chroma_op= chroma_put;
2001
2002 dest_y += 2*x_offset + 2*y_offset*s-> linesize;
2003 dest_cb += x_offset + y_offset*s->uvlinesize;
2004 dest_cr += x_offset + y_offset*s->uvlinesize;
2005 x_offset += 8*s->mb_x;
2006 y_offset += 8*s->mb_y;
2007
2008 if(list0){
2009 Picture *ref= &h->ref_list[0][ h->ref_cache[0][ scan8[n] ] ];
2010 mc_dir_part(h, ref, n, square, chroma_height, delta, 0,
2011 dest_y, dest_cb, dest_cr, x_offset, y_offset,
2012 qpix_op, chroma_op);
2013
2014 qpix_op= qpix_avg;
2015 chroma_op= chroma_avg;
2016 }
2017
2018 if(list1){
2019 Picture *ref= &h->ref_list[1][ h->ref_cache[1][ scan8[n] ] ];
2020 mc_dir_part(h, ref, n, square, chroma_height, delta, 1,
2021 dest_y, dest_cb, dest_cr, x_offset, y_offset,
2022 qpix_op, chroma_op);
2023 }
2024 }
2025
2026 static void hl_motion(H264Context *h, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2027 qpel_mc_func (*qpix_put)[16], h264_chroma_mc_func (*chroma_put),
2028 qpel_mc_func (*qpix_avg)[16], h264_chroma_mc_func (*chroma_avg)){
2029 MpegEncContext * const s = &h->s;
2030 const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
2031 const int mb_type= s->current_picture.mb_type[mb_xy];
2032
2033 assert(IS_INTER(mb_type));
2034
2035 if(IS_16X16(mb_type)){
2036 mc_part(h, 0, 1, 8, 0, dest_y, dest_cb, dest_cr, 0, 0,
2037 qpix_put[0], chroma_put[0], qpix_avg[0], chroma_avg[0],
2038 IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
2039 }else if(IS_16X8(mb_type)){
2040 mc_part(h, 0, 0, 4, 8, dest_y, dest_cb, dest_cr, 0, 0,
2041 qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0],
2042 IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
2043 mc_part(h, 8, 0, 4, 8, dest_y, dest_cb, dest_cr, 0, 4,
2044 qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0],
2045 IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1));
2046 }else if(IS_8X16(mb_type)){
2047 mc_part(h, 0, 0, 8, 8*s->linesize, dest_y, dest_cb, dest_cr, 0, 0,
2048 qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
2049 IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
2050 mc_part(h, 4, 0, 8, 8*s->linesize, dest_y, dest_cb, dest_cr, 4, 0,
2051 qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
2052 IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1));
2053 }else{
2054 int i;
2055
2056 assert(IS_8X8(mb_type));
2057
2058 for(i=0; i<4; i++){
2059 const int sub_mb_type= h->sub_mb_type[i];
2060 const int n= 4*i;
2061 int x_offset= (i&1)<<2;
2062 int y_offset= (i&2)<<1;
2063
2064 if(IS_SUB_8X8(sub_mb_type)){
2065 mc_part(h, n, 1, 4, 0, dest_y, dest_cb, dest_cr, x_offset, y_offset,
2066 qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
2067 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
2068 }else if(IS_SUB_8X4(sub_mb_type)){
2069 mc_part(h, n , 0, 2, 4, dest_y, dest_cb, dest_cr, x_offset, y_offset,
2070 qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1],
2071 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
2072 mc_part(h, n+2, 0, 2, 4, dest_y, dest_cb, dest_cr, x_offset, y_offset+2,
2073 qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1],
2074 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
2075 }else if(IS_SUB_4X8(sub_mb_type)){
2076 mc_part(h, n , 0, 4, 4*s->linesize, dest_y, dest_cb, dest_cr, x_offset, y_offset,
2077 qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
2078 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
2079 mc_part(h, n+1, 0, 4, 4*s->linesize, dest_y, dest_cb, dest_cr, x_offset+2, y_offset,
2080 qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
2081 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
2082 }else{
2083 int j;
2084 assert(IS_SUB_4X4(sub_mb_type));
2085 for(j=0; j<4; j++){
2086 int sub_x_offset= x_offset + 2*(j&1);
2087 int sub_y_offset= y_offset + (j&2);
2088 mc_part(h, n+j, 1, 2, 0, dest_y, dest_cb, dest_cr, sub_x_offset, sub_y_offset,
2089 qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
2090 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
2091 }
2092 }
2093 }
2094 }
2095 }
2096
2097 static void decode_init_vlc(H264Context *h){
2098 static int done = 0;
2099
2100 if (!done) {
2101 int i;
2102 done = 1;
2103
2104 init_vlc(&chroma_dc_coeff_token_vlc, CHROMA_DC_COEFF_TOKEN_VLC_BITS, 4*5,
2105 &chroma_dc_coeff_token_len [0], 1, 1,
2106 &chroma_dc_coeff_token_bits[0], 1, 1);
2107
2108 for(i=0; i<4; i++){
2109 init_vlc(&coeff_token_vlc[i], COEFF_TOKEN_VLC_BITS, 4*17,
2110 &coeff_token_len [i][0], 1, 1,
2111 &coeff_token_bits[i][0], 1, 1);
2112 }
2113
2114 for(i=0; i<3; i++){
2115 init_vlc(&chroma_dc_total_zeros_vlc[i], CHROMA_DC_TOTAL_ZEROS_VLC_BITS, 4,
2116 &chroma_dc_total_zeros_len [i][0], 1, 1,
2117 &chroma_dc_total_zeros_bits[i][0], 1, 1);
2118 }
2119 for(i=0; i<15; i++){
2120 init_vlc(&total_zeros_vlc[i], TOTAL_ZEROS_VLC_BITS, 16,
2121 &total_zeros_len [i][0], 1, 1,
2122 &total_zeros_bits[i][0], 1, 1);
2123 }
2124
2125 for(i=0; i<6; i++){
2126 init_vlc(&run_vlc[i], RUN_VLC_BITS, 7,
2127 &run_len [i][0], 1, 1,
2128 &run_bits[i][0], 1, 1);
2129 }
2130 init_vlc(&run7_vlc, RUN7_VLC_BITS, 16,
2131 &run_len [6][0], 1, 1,
2132 &run_bits[6][0], 1, 1);
2133 }
2134 }
2135
2136 /**
2137 * Sets the intra prediction function pointers.
2138 */
2139 static void init_pred_ptrs(H264Context *h){
2140 // MpegEncContext * const s = &h->s;
2141
2142 h->pred4x4[VERT_PRED ]= pred4x4_vertical_c;
2143 h->pred4x4[HOR_PRED ]= pred4x4_horizontal_c;
2144 h->pred4x4[DC_PRED ]= pred4x4_dc_c;
2145 h->pred4x4[DIAG_DOWN_LEFT_PRED ]= pred4x4_down_left_c;
2146 h->pred4x4[DIAG_DOWN_RIGHT_PRED]= pred4x4_down_right_c;
2147 h->pred4x4[VERT_RIGHT_PRED ]= pred4x4_vertical_right_c;
2148 h->pred4x4[HOR_DOWN_PRED ]= pred4x4_horizontal_down_c;
2149 h->pred4x4[VERT_LEFT_PRED ]= pred4x4_vertical_left_c;
2150 h->pred4x4[HOR_UP_PRED ]= pred4x4_horizontal_up_c;
2151 h->pred4x4[LEFT_DC_PRED ]= pred4x4_left_dc_c;
2152 h->pred4x4[TOP_DC_PRED ]= pred4x4_top_dc_c;
2153 h->pred4x4[DC_128_PRED ]= pred4x4_128_dc_c;
2154
2155 h->pred8x8[DC_PRED8x8 ]= pred8x8_dc_c;
2156 h->pred8x8[VERT_PRED8x8 ]= pred8x8_vertical_c;
2157 h->pred8x8[HOR_PRED8x8 ]= pred8x8_horizontal_c;
2158 h->pred8x8[PLANE_PRED8x8 ]= pred8x8_plane_c;
2159 h->pred8x8[LEFT_DC_PRED8x8]= pred8x8_left_dc_c;
2160 h->pred8x8[TOP_DC_PRED8x8 ]= pred8x8_top_dc_c;
2161 h->pred8x8[DC_128_PRED8x8 ]= pred8x8_128_dc_c;
2162
2163 h->pred16x16[DC_PRED8x8 ]= pred16x16_dc_c;
2164 h->pred16x16[VERT_PRED8x8 ]= pred16x16_vertical_c;
2165 h->pred16x16[HOR_PRED8x8 ]= pred16x16_horizontal_c;
2166 h->pred16x16[PLANE_PRED8x8 ]= pred16x16_plane_c;
2167 h->pred16x16[LEFT_DC_PRED8x8]= pred16x16_left_dc_c;
2168 h->pred16x16[TOP_DC_PRED8x8 ]= pred16x16_top_dc_c;
2169 h->pred16x16[DC_128_PRED8x8 ]= pred16x16_128_dc_c;
2170 }
2171
2172 static void free_tables(H264Context *h){
2173 av_freep(&h->intra4x4_pred_mode);
2174 av_freep(&h->chroma_pred_mode_table);
2175 av_freep(&h->cbp_table);
2176 av_freep(&h->mvd_table[0]);
2177 av_freep(&h->mvd_table[1]);
2178 av_freep(&h->non_zero_count);
2179 av_freep(&h->slice_table_base);
2180 av_freep(&h->top_border);
2181 h->slice_table= NULL;
2182
2183 av_freep(&h->mb2b_xy);
2184 av_freep(&h->mb2b8_xy);
2185 }
2186
2187 /**
2188 * allocates tables.
2189 * needs widzh/height
2190 */
2191 static int alloc_tables(H264Context *h){
2192 MpegEncContext * const s = &h->s;
2193 const int big_mb_num= s->mb_stride * (s->mb_height+1);
2194 int x,y;
2195
2196 CHECKED_ALLOCZ(h->intra4x4_pred_mode, big_mb_num * 8 * sizeof(uint8_t))
2197
2198 CHECKED_ALLOCZ(h->non_zero_count , big_mb_num * 16 * sizeof(uint8_t))
2199 CHECKED_ALLOCZ(h->slice_table_base , big_mb_num * sizeof(uint8_t))
2200 CHECKED_ALLOCZ(h->top_border , s->mb_width * (16+8+8) * sizeof(uint8_t))
2201
2202 if( h->pps.cabac ) {
2203 CHECKED_ALLOCZ(h->chroma_pred_mode_table, big_mb_num * sizeof(uint8_t))
2204 CHECKED_ALLOCZ(h->cbp_table, big_mb_num * sizeof(uint16_t))
2205 CHECKED_ALLOCZ(h->mvd_table[0], 32*big_mb_num * sizeof(uint16_t));
2206 CHECKED_ALLOCZ(h->mvd_table[1], 32*big_mb_num * sizeof(uint16_t));
2207 }
2208
2209 memset(h->slice_table_base, -1, big_mb_num * sizeof(uint8_t));
2210 h->slice_table= h->slice_table_base + s->mb_stride + 1;
2211
2212 CHECKED_ALLOCZ(h->mb2b_xy , big_mb_num * sizeof(uint16_t));
2213 CHECKED_ALLOCZ(h->mb2b8_xy , big_mb_num * sizeof(uint16_t));
2214 for(y=0; y<s->mb_height; y++){
2215 for(x=0; x<s->mb_width; x++){
2216 const int mb_xy= x + y*s->mb_stride;
2217 const int b_xy = 4*x + 4*y*h->b_stride;
2218 const int b8_xy= 2*x + 2*y*h->b8_stride;
2219
2220 h->mb2b_xy [mb_xy]= b_xy;
2221 h->mb2b8_xy[mb_xy]= b8_xy;
2222 }
2223 }
2224
2225 return 0;
2226 fail:
2227 free_tables(h);
2228 return -1;
2229 }
2230
2231 static void common_init(H264Context *h){
2232 MpegEncContext * const s = &h->s;
2233
2234 s->width = s->avctx->width;
2235 s->height = s->avctx->height;
2236 s->codec_id= s->avctx->codec->id;
2237
2238 init_pred_ptrs(h);
2239
2240 s->unrestricted_mv=1;
2241 s->decode=1; //FIXME
2242 }
2243
2244 static int decode_init(AVCodecContext *avctx){
2245 H264Context *h= avctx->priv_data;
2246 MpegEncContext * const s = &h->s;
2247
2248 MPV_decode_defaults(s);
2249
2250 s->avctx = avctx;
2251 common_init(h);
2252
2253 s->out_format = FMT_H264;
2254 s->workaround_bugs= avctx->workaround_bugs;
2255
2256 // set defaults
2257 // s->decode_mb= ff_h263_decode_mb;
2258 s->low_delay= 1;
2259 avctx->pix_fmt= PIX_FMT_YUV420P;
2260
2261 decode_init_vlc(h);
2262
2263 return 0;
2264 }
2265
2266 static void frame_start(H264Context *h){
2267 MpegEncContext * const s = &h->s;
2268 int i;
2269
2270 MPV_frame_start(s, s->avctx);
2271 ff_er_frame_start(s);
2272 h->mmco_index=0;
2273
2274 assert(s->linesize && s->uvlinesize);
2275
2276 for(i=0; i<16; i++){
2277 h->block_offset[i]= 4*((scan8[i] - scan8[0])&7) + 4*s->linesize*((scan8[i] - scan8[0])>>3);
2278 h->chroma_subblock_offset[i]= 2*((scan8[i] - scan8[0])&7) + 2*s->uvlinesize*((scan8[i] - scan8[0])>>3);
2279 }
2280 for(i=0; i<4; i++){
2281 h->block_offset[16+i]=
2282 h->block_offset[20+i]= 4*((scan8[i] - scan8[0])&7) + 4*s->uvlinesize*((scan8[i] - scan8[0])>>3);
2283 }
2284
2285 // s->decode= (s->flags&CODEC_FLAG_PSNR) || !s->encoding || s->current_picture.reference /*|| h->contains_intra*/ || 1;
2286 }
2287
2288 static inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize){
2289 MpegEncContext * const s = &h->s;
2290 int i;
2291
2292 src_y -= linesize;
2293 src_cb -= uvlinesize;
2294 src_cr -= uvlinesize;
2295
2296 h->left_border[0]= h->top_border[s->mb_x][15];
2297 for(i=1; i<17; i++){
2298 h->left_border[i]= src_y[15+i* linesize];
2299 }
2300
2301 *(uint64_t*)(h->top_border[s->mb_x]+0)= *(uint64_t*)(src_y + 16*linesize);
2302 *(uint64_t*)(h->top_border[s->mb_x]+8)= *(uint64_t*)(src_y +8+16*linesize);
2303
2304 if(!(s->flags&CODEC_FLAG_GRAY)){
2305 h->left_border[17 ]= h->top_border[s->mb_x][16+7];
2306 h->left_border[17+9]= h->top_border[s->mb_x][24+7];
2307 for(i=1; i<9; i++){
2308 h->left_border[i+17 ]= src_cb[7+i*uvlinesize];
2309 h->left_border[i+17+9]= src_cr[7+i*uvlinesize];
2310 }
2311 *(uint64_t*)(h->top_border[s->mb_x]+16)= *(uint64_t*)(src_cb+8*uvlinesize);
2312 *(uint64_t*)(h->top_border[s->mb_x]+24)= *(uint64_t*)(src_cr+8*uvlinesize);
2313 }
2314 }
2315
2316 static inline void xchg_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg){
2317 MpegEncContext * const s = &h->s;
2318 int temp8, i;
2319 uint64_t temp64;
2320
2321 src_y -= linesize + 1;
2322 src_cb -= uvlinesize + 1;
2323 src_cr -= uvlinesize + 1;
2324
2325 #define XCHG(a,b,t,xchg)\
2326 t= a;\
2327 if(xchg)\
2328 a= b;\
2329 b= t;
2330
2331 for(i=0; i<17; i++){
2332 XCHG(h->left_border[i ], src_y [i* linesize], temp8, xchg);
2333 }
2334
2335 XCHG(*(uint64_t*)(h->top_border[s->mb_x]+0), *(uint64_t*)(src_y +1), temp64, xchg);
2336 XCHG(*(uint64_t*)(h->top_border[s->mb_x]+8), *(uint64_t*)(src_y +9), temp64, 1);
2337
2338 if(!(s->flags&CODEC_FLAG_GRAY)){
2339 for(i=0; i<9; i++){
2340 XCHG(h->left_border[i+17 ], src_cb[i*uvlinesize], temp8, xchg);
2341 XCHG(h->left_border[i+17+9], src_cr[i*uvlinesize], temp8, xchg);
2342 }
2343 XCHG(*(uint64_t*)(h->top_border[s->mb_x]+16), *(uint64_t*)(src_cb+1), temp64, 1);
2344 XCHG(*(uint64_t*)(h->top_border[s->mb_x]+24), *(uint64_t*)(src_cr+1), temp64, 1);
2345 }
2346 }
2347
2348 static void hl_decode_mb(H264Context *h){
2349 MpegEncContext * const s = &h->s;
2350 const int mb_x= s->mb_x;
2351 const int mb_y= s->mb_y;
2352 const int mb_xy= mb_x + mb_y*s->mb_stride;
2353 const int mb_type= s->current_picture.mb_type[mb_xy];
2354 uint8_t *dest_y, *dest_cb, *dest_cr;
2355 int linesize, uvlinesize /*dct_offset*/;
2356 int i;
2357
2358 if(!s->decode)
2359 return;
2360
2361 if(s->mb_skiped){
2362 }
2363
2364 dest_y = s->current_picture.data[0] + (mb_y * 16* s->linesize ) + mb_x * 16;
2365 dest_cb = s->current_picture.data[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
2366 dest_cr = s->current_picture.data[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
2367
2368 if (h->mb_field_decoding_flag) {
2369 linesize = s->linesize * 2;
2370 uvlinesize = s->uvlinesize * 2;
2371 if(mb_y&1){ //FIXME move out of this func?
2372 dest_y -= s->linesize*15;
2373 dest_cb-= s->linesize*7;
2374 dest_cr-= s->linesize*7;
2375 }
2376 } else {
2377 linesize = s->linesize;
2378 uvlinesize = s->uvlinesize;
2379 // dct_offset = s->linesize * 16;
2380 }
2381
2382 if(IS_INTRA(mb_type)){
2383 if(h->deblocking_filter)
2384 xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 1);
2385
2386 if(!(s->flags&CODEC_FLAG_GRAY)){
2387 h->pred8x8[ h->chroma_pred_mode ](dest_cb, uvlinesize);
2388 h->pred8x8[ h->chroma_pred_mode ](dest_cr, uvlinesize);
2389 }
2390
2391 if(IS_INTRA4x4(mb_type)){
2392 if(!s->encoding){
2393 for(i=0; i<16; i++){
2394 uint8_t * const ptr= dest_y + h->block_offset[i];
2395 uint8_t *topright= ptr + 4 - linesize;
2396 const int topright_avail= (h->topright_samples_available<<i)&0x8000;
2397 const int dir= h->intra4x4_pred_mode_cache[ scan8[i] ];
2398 int tr;
2399
2400 if(!topright_avail){
2401 tr= ptr[3 - linesize]*0x01010101;
2402 topright= (uint8_t*) &tr;
2403 }else if(i==5 && h->deblocking_filter){
2404 tr= *(uint32_t*)h->top_border[mb_x+1];
2405 topright= (uint8_t*) &tr;
2406 }
2407
2408 h->pred4x4[ dir ](ptr, topright, linesize);
2409 if(h->non_zero_count_cache[ scan8[i] ]){
2410 if(s->codec_id == CODEC_ID_H264)
2411 h264_add_idct_c(ptr, h->mb + i*16, linesize);
2412 else
2413 svq3_add_idct_c(ptr, h->mb + i*16, linesize, s->qscale, 0);
2414 }
2415 }
2416 }
2417 }else{
2418 h->pred16x16[ h->intra16x16_pred_mode ](dest_y , linesize);
2419 if(s->codec_id == CODEC_ID_H264)
2420 h264_luma_dc_dequant_idct_c(h->mb, s->qscale);
2421 else
2422 svq3_luma_dc_dequant_idct_c(h->mb, s->qscale);
2423 }
2424 if(h->deblocking_filter)
2425 xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 0);
2426 }else if(s->codec_id == CODEC_ID_H264){
2427 hl_motion(h, dest_y, dest_cb, dest_cr,
2428 s->dsp.put_h264_qpel_pixels_tab, s->dsp.put_h264_chroma_pixels_tab,
2429 s->dsp.avg_h264_qpel_pixels_tab, s->dsp.avg_h264_chroma_pixels_tab);
2430 }
2431
2432
2433 if(!IS_INTRA4x4(mb_type)){
2434 if(s->codec_id == CODEC_ID_H264){
2435 for(i=0; i<16; i++){
2436 if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){ //FIXME benchmark weird rule, & below
2437 uint8_t * const ptr= dest_y + h->block_offset[i];
2438 h264_add_idct_c(ptr, h->mb + i*16, linesize);
2439 }
2440 }
2441 }else{
2442 for(i=0; i<16; i++){
2443 if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){ //FIXME benchmark weird rule, & below
2444 uint8_t * const ptr= dest_y + h->block_offset[i];
2445 svq3_add_idct_c(ptr, h->mb + i*16, linesize, s->qscale, IS_INTRA(mb_type) ? 1 : 0);
2446 }
2447 }
2448 }
2449 }
2450
2451 if(!(s->flags&CODEC_FLAG_GRAY)){
2452 chroma_dc_dequant_idct_c(h->mb + 16*16, h->chroma_qp);
2453 chroma_dc_dequant_idct_c(h->mb + 16*16+4*16, h->chroma_qp);
2454 if(s->codec_id == CODEC_ID_H264){
2455 for(i=16; i<16+4; i++){
2456 if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){
2457 uint8_t * const ptr= dest_cb + h->block_offset[i];
2458 h264_add_idct_c(ptr, h->mb + i*16, uvlinesize);
2459 }
2460 }
2461 for(i=20; i<20+4; i++){
2462 if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){
2463 uint8_t * const ptr= dest_cr + h->block_offset[i];
2464 h264_add_idct_c(ptr, h->mb + i*16, uvlinesize);
2465 }
2466 }
2467 }else{
2468 for(i=16; i<16+4; i++){
2469 if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){
2470 uint8_t * const ptr= dest_cb + h->block_offset[i];
2471 svq3_add_idct_c(ptr, h->mb + i*16, uvlinesize, chroma_qp[s->qscale + 12] - 12, 2);
2472 }
2473 }
2474 for(i=20; i<20+4; i++){
2475 if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){
2476 uint8_t * const ptr= dest_cr + h->block_offset[i];
2477 svq3_add_idct_c(ptr, h->mb + i*16, uvlinesize, chroma_qp[s->qscale + 12] - 12, 2);
2478 }
2479 }
2480 }
2481 }
2482 if(h->deblocking_filter) {
2483 backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
2484 filter_mb(h, mb_x, mb_y, dest_y, dest_cb, dest_cr);
2485 }
2486 }
2487
2488 /**
2489 * fills the default_ref_list.
2490 */
2491 static int fill_default_ref_list(H264Context *h){
2492 MpegEncContext * const s = &h->s;
2493 int i;
2494 Picture sorted_short_ref[16];
2495
2496 if(h->slice_type==B_TYPE){
2497 int out_i;
2498 int limit= -1;
2499
2500 for(out_i=0; out_i<h->short_ref_count; out_i++){
2501 int best_i=-1;
2502 int best_poc=-1;
2503
2504 for(i=0; i<h->short_ref_count; i++){
2505 const int poc= h->short_ref[i]->poc;
2506 if(poc > limit && poc < best_poc){
2507 best_poc= poc;
2508 best_i= i;
2509 }
2510 }
2511
2512 assert(best_i != -1);
2513
2514 limit= best_poc;
2515 sorted_short_ref[out_i]= *h->short_ref[best_i];
2516 }
2517 }
2518
2519 if(s->picture_structure == PICT_FRAME){
2520 if(h->slice_type==B_TYPE){
2521 const int current_poc= s->current_picture_ptr->poc;
2522 int list;
2523
2524 for(list=0; list<2; list++){
2525 int index=0;
2526
2527 for(i=0; i<h->short_ref_count && index < h->ref_count[list]; i++){
2528 const int i2= list ? h->short_ref_count - i - 1 : i;
2529 const int poc= sorted_short_ref[i2].poc;
2530
2531 if(sorted_short_ref[i2].reference != 3) continue; //FIXME refernce field shit
2532
2533 if((list==1 && poc > current_poc) || (list==0 && poc < current_poc)){
2534 h->default_ref_list[list][index ]= sorted_short_ref[i2];
2535 h->default_ref_list[list][index++].pic_id= sorted_short_ref[i2].frame_num;
2536 }
2537 }
2538
2539 for(i=0; i<h->long_ref_count && index < h->ref_count[ list ]; i++){
2540 if(h->long_ref[i]->reference != 3) continue;
2541
2542 h->default_ref_list[ list ][index ]= *h->long_ref[i];
2543 h->default_ref_list[ list ][index++].pic_id= i;;
2544 }
2545
2546 if(h->long_ref_count > 1 && h->short_ref_count==0){
2547 Picture temp= h->default_ref_list[1][0];
2548 h->default_ref_list[1][0] = h->default_ref_list[1][1];
2549 h->default_ref_list[1][0] = temp;
2550 }
2551
2552 if(index < h->ref_count[ list ])
2553 memset(&h->default_ref_list[list][index], 0, sizeof(Picture)*(h->ref_count[ list ] - index));
2554 }
2555 }else{
2556 int index=0;
2557 for(i=0; i<h->short_ref_count && index < h->ref_count[0]; i++){
2558 if(h->short_ref[i]->reference != 3) continue; //FIXME refernce field shit
2559 h->default_ref_list[0][index ]= *h->short_ref[i];
2560 h->default_ref_list[0][index++].pic_id= h->short_ref[i]->frame_num;
2561 }
2562 for(i=0; i<h->long_ref_count && index < h->ref_count[0]; i++){
2563 if(h->long_ref[i]->reference != 3) continue;
2564 h->default_ref_list[0][index ]= *h->long_ref[i];
2565 h->default_ref_list[0][index++].pic_id= i;;
2566 }
2567 if(index < h->ref_count[0])
2568 memset(&h->default_ref_list[0][index], 0, sizeof(Picture)*(h->ref_count[0] - index));
2569 }
2570 }else{ //FIELD
2571 if(h->slice_type==B_TYPE){
2572 }else{
2573 //FIXME second field balh
2574 }
2575 }
2576 return 0;
2577 }
2578
2579 static int decode_ref_pic_list_reordering(H264Context *h){
2580 MpegEncContext * const s = &h->s;
2581 int list;
2582
2583 if(h->slice_type==I_TYPE || h->slice_type==SI_TYPE) return 0; //FIXME move beofre func
2584
2585 for(list=0; list<2; list++){
2586 memcpy(h->ref_list[list], h->default_ref_list[list], sizeof(Picture)*h->ref_count[list]);
2587
2588 if(get_bits1(&s->gb)){
2589 int pred= h->curr_pic_num;
2590 int index;
2591
2592 for(index=0; ; index++){
2593 int reordering_of_pic_nums_idc= get_ue_golomb(&s->gb);
2594 int pic_id;
2595 int i;
2596
2597
2598 if(index >= h->ref_count[list]){
2599 av_log(h->s.avctx, AV_LOG_ERROR, "reference count overflow\n");
2600 return -1;
2601 }
2602
2603 if(reordering_of_pic_nums_idc<3){
2604 if(reordering_of_pic_nums_idc<2){
2605 const int abs_diff_pic_num= get_ue_golomb(&s->gb) + 1;
2606
2607 if(abs_diff_pic_num >= h->max_pic_num){
2608 av_log(h->s.avctx, AV_LOG_ERROR, "abs_diff_pic_num overflow\n");
2609 return -1;
2610 }
2611
2612 if(reordering_of_pic_nums_idc == 0) pred-= abs_diff_pic_num;
2613 else pred+= abs_diff_pic_num;
2614 pred &= h->max_pic_num - 1;
2615
2616 for(i= h->ref_count[list]-1; i>=index; i--){
2617 if(h->ref_list[list][i].pic_id == pred && h->ref_list[list][i].long_ref==0)
2618 break;
2619 }
2620 }else{
2621 pic_id= get_ue_golomb(&s->gb); //long_term_pic_idx
2622
2623 for(i= h->ref_count[list]-1; i>=index; i--){
2624 if(h->ref_list[list][i].pic_id == pic_id && h->ref_list[list][i].long_ref==1)
2625 break;
2626 }
2627 }
2628
2629 if(i < index){
2630 av_log(h->s.avctx, AV_LOG_ERROR, "reference picture missing during reorder\n");
2631 memset(&h->ref_list[list][index], 0, sizeof(Picture)); //FIXME
2632 }else if(i > index){
2633 Picture tmp= h->ref_list[list][i];
2634 for(; i>index; i--){
2635 h->ref_list[list][i]= h->ref_list[list][i-1];
2636 }
2637 h->ref_list[list][index]= tmp;
2638 }
2639 }else if(reordering_of_pic_nums_idc==3)
2640 break;
2641 else{
2642 av_log(h->s.avctx, AV_LOG_ERROR, "illegal reordering_of_pic_nums_idc\n");
2643 return -1;
2644 }
2645 }
2646 }
2647
2648 if(h->slice_type!=B_TYPE) break;
2649 }
2650 return 0;
2651 }
2652
2653 static int pred_weight_table(H264Context *h){
2654 MpegEncContext * const s = &h->s;
2655 int list, i;
2656
2657 h->luma_log2_weight_denom= get_ue_golomb(&s->gb);
2658 h->chroma_log2_weight_denom= get_ue_golomb(&s->gb);
2659
2660 for(list=0; list<2; list++){
2661 for(i=0; i<h->ref_count[list]; i++){
2662 int luma_weight_flag, chroma_weight_flag;
2663
2664 luma_weight_flag= get_bits1(&s->gb);
2665 if(luma_weight_flag){
2666 h->luma_weight[list][i]= get_se_golomb(&s->gb);
2667 h->luma_offset[list][i]= get_se_golomb(&s->gb);
2668 }
2669
2670 chroma_weight_flag= get_bits1(&s->gb);
2671 if(chroma_weight_flag){
2672 int j;
2673 for(j=0; j<2; j++){
2674 h->chroma_weight[list][i][j]= get_se_golomb(&s->gb);
2675 h->chroma_offset[list][i][j]= get_se_golomb(&s->gb);
2676 }
2677 }
2678 }
2679 if(h->slice_ty