cleanup macroblock layer: merged decode of skipped MBs
[libav.git] / libavcodec / cavs.c
1 /*
2 * Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
3 * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 /**
21 * @file cavs.c
22 * Chinese AVS video (AVS1-P2, JiZhun profile) decoder
23 * @author Stefan Gehrer <stefan.gehrer@gmx.de>
24 */
25
26 #include "avcodec.h"
27 #include "bitstream.h"
28 #include "golomb.h"
29 #include "mpegvideo.h"
30 #include "cavsdata.h"
31
32 typedef struct {
33 MpegEncContext s;
34 Picture picture; ///< currently decoded frame
35 Picture DPB[2]; ///< reference frames
36 int dist[2]; ///< temporal distances from current frame to ref frames
37 int profile, level;
38 int aspect_ratio;
39 int mb_width, mb_height;
40 int pic_type;
41 int progressive;
42 int pic_structure;
43 int skip_mode_flag; ///< select between skip_count or one skip_flag per MB
44 int loop_filter_disable;
45 int alpha_offset, beta_offset;
46 int ref_flag;
47 int mbx, mby; ///< macroblock coordinates
48 int flags; ///< availability flags of neighbouring macroblocks
49 int stc; ///< last start code
50 uint8_t *cy, *cu, *cv; ///< current MB sample pointers
51 int left_qp;
52 uint8_t *top_qp;
53
54 /** mv motion vector cache
55 0: D3 B2 B3 C2
56 4: A1 X0 X1 -
57 8: A3 X2 X3 -
58
59 X are the vectors in the current macroblock (5,6,9,10)
60 A is the macroblock to the left (4,8)
61 B is the macroblock to the top (1,2)
62 C is the macroblock to the top-right (3)
63 D is the macroblock to the top-left (0)
64
65 the same is repeated for backward motion vectors */
66 vector_t mv[2*4*3];
67 vector_t *top_mv[2];
68 vector_t *col_mv;
69
70 /** luma pred mode cache
71 0: -- B2 B3
72 3: A1 X0 X1
73 6: A3 X2 X3 */
74 int pred_mode_Y[3*3];
75 int *top_pred_Y;
76 int l_stride, c_stride;
77 int luma_scan[4];
78 int qp;
79 int qp_fixed;
80 int cbp;
81
82 /** intra prediction is done with un-deblocked samples
83 they are saved here before deblocking the MB */
84 uint8_t *top_border_y, *top_border_u, *top_border_v;
85 uint8_t left_border_y[16], left_border_u[10], left_border_v[10];
86 uint8_t topleft_border_y, topleft_border_u, topleft_border_v;
87
88 void (*intra_pred_l[8])(uint8_t *d,uint8_t *top,uint8_t *left,int stride);
89 void (*intra_pred_c[7])(uint8_t *d,uint8_t *top,uint8_t *left,int stride);
90 uint8_t *col_type_base;
91 uint8_t *col_type;
92
93 /* scaling factors for MV prediction */
94 int sym_factor; ///< for scaling in symmetrical B block
95 int direct_den[2]; ///< for scaling in direct B block
96 int scale_den[2]; ///< for scaling neighbouring MVs
97
98 int got_keyframe;
99 } AVSContext;
100
101 /*****************************************************************************
102 *
103 * in-loop deblocking filter
104 *
105 ****************************************************************************/
106
107 static inline int get_bs_p(vector_t *mvP, vector_t *mvQ) {
108 if((mvP->ref == REF_INTRA) || (mvQ->ref == REF_INTRA))
109 return 2;
110 if(mvP->ref != mvQ->ref)
111 return 1;
112 if( (abs(mvP->x - mvQ->x) >= 4) || (abs(mvP->y - mvQ->y) >= 4) )
113 return 1;
114 return 0;
115 }
116
117 static inline int get_bs_b(vector_t *mvP, vector_t *mvQ) {
118 if((mvP->ref == REF_INTRA) || (mvQ->ref == REF_INTRA)) {
119 return 2;
120 } else {
121 vector_t *mvPbw = mvP + MV_BWD_OFFS;
122 vector_t *mvQbw = mvQ + MV_BWD_OFFS;
123 if( (abs( mvP->x - mvQ->x) >= 4) ||
124 (abs( mvP->y - mvQ->y) >= 4) ||
125 (abs(mvPbw->x - mvQbw->x) >= 4) ||
126 (abs(mvPbw->y - mvQbw->y) >= 4) )
127 return 1;
128 }
129 return 0;
130 }
131
132 #define SET_PARAMS \
133 alpha = alpha_tab[clip(qp_avg + h->alpha_offset,0,63)]; \
134 beta = beta_tab[clip(qp_avg + h->beta_offset, 0,63)]; \
135 tc = tc_tab[clip(qp_avg + h->alpha_offset,0,63)];
136
137 /**
138 * in-loop deblocking filter for a single macroblock
139 *
140 * boundary strength (bs) mapping:
141 *
142 * --4---5--
143 * 0 2 |
144 * | 6 | 7 |
145 * 1 3 |
146 * ---------
147 *
148 */
149 static void filter_mb(AVSContext *h, enum mb_t mb_type) {
150 DECLARE_ALIGNED_8(uint8_t, bs[8]);
151 int qp_avg, alpha, beta, tc;
152 int i;
153
154 /* save un-deblocked lines */
155 h->topleft_border_y = h->top_border_y[h->mbx*16+15];
156 h->topleft_border_u = h->top_border_u[h->mbx*10+8];
157 h->topleft_border_v = h->top_border_v[h->mbx*10+8];
158 memcpy(&h->top_border_y[h->mbx*16], h->cy + 15* h->l_stride,16);
159 memcpy(&h->top_border_u[h->mbx*10+1], h->cu + 7* h->c_stride,8);
160 memcpy(&h->top_border_v[h->mbx*10+1], h->cv + 7* h->c_stride,8);
161 for(i=0;i<8;i++) {
162 h->left_border_y[i*2+0] = *(h->cy + 15 + (i*2+0)*h->l_stride);
163 h->left_border_y[i*2+1] = *(h->cy + 15 + (i*2+1)*h->l_stride);
164 h->left_border_u[i+1] = *(h->cu + 7 + i*h->c_stride);
165 h->left_border_v[i+1] = *(h->cv + 7 + i*h->c_stride);
166 }
167 if(!h->loop_filter_disable) {
168 /* clear bs */
169 *((uint64_t *)bs) = 0;
170 /* determine bs */
171 switch(mb_type) {
172 case I_8X8:
173 *((uint64_t *)bs) = 0x0202020202020202ULL;
174 break;
175 case P_8X8:
176 case P_8X16:
177 bs[2] = get_bs_p(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X1]);
178 bs[3] = get_bs_p(&h->mv[MV_FWD_X2], &h->mv[MV_FWD_X3]);
179 case P_16X8:
180 bs[6] = get_bs_p(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X2]);
181 bs[7] = get_bs_p(&h->mv[MV_FWD_X1], &h->mv[MV_FWD_X3]);
182 case P_16X16:
183 case P_SKIP:
184 bs[0] = get_bs_p(&h->mv[MV_FWD_A1], &h->mv[MV_FWD_X0]);
185 bs[1] = get_bs_p(&h->mv[MV_FWD_A3], &h->mv[MV_FWD_X2]);
186 bs[4] = get_bs_p(&h->mv[MV_FWD_B2], &h->mv[MV_FWD_X0]);
187 bs[5] = get_bs_p(&h->mv[MV_FWD_B3], &h->mv[MV_FWD_X1]);
188 break;
189 case B_SKIP:
190 case B_DIRECT:
191 case B_8X8:
192 bs[2] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X1]);
193 bs[3] = get_bs_b(&h->mv[MV_FWD_X2], &h->mv[MV_FWD_X3]);
194 bs[6] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X2]);
195 bs[7] = get_bs_b(&h->mv[MV_FWD_X1], &h->mv[MV_FWD_X3]);
196 case B_FWD_16X16:
197 case B_BWD_16X16:
198 case B_SYM_16X16:
199 bs[0] = get_bs_b(&h->mv[MV_FWD_A1], &h->mv[MV_FWD_X0]);
200 bs[1] = get_bs_b(&h->mv[MV_FWD_A3], &h->mv[MV_FWD_X2]);
201 bs[4] = get_bs_b(&h->mv[MV_FWD_B2], &h->mv[MV_FWD_X0]);
202 bs[5] = get_bs_b(&h->mv[MV_FWD_B3], &h->mv[MV_FWD_X1]);
203 break;
204 default:
205 if(mb_type & 1) { //16X8
206 bs[6] = bs[7] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X2]);
207 } else { //8X16
208 bs[2] = bs[3] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X1]);
209 }
210 bs[0] = get_bs_b(&h->mv[MV_FWD_A1], &h->mv[MV_FWD_X0]);
211 bs[1] = get_bs_b(&h->mv[MV_FWD_A3], &h->mv[MV_FWD_X2]);
212 bs[4] = get_bs_b(&h->mv[MV_FWD_B2], &h->mv[MV_FWD_X0]);
213 bs[5] = get_bs_b(&h->mv[MV_FWD_B3], &h->mv[MV_FWD_X1]);
214 }
215 if( *((uint64_t *)bs) ) {
216 if(h->flags & A_AVAIL) {
217 qp_avg = (h->qp + h->left_qp + 1) >> 1;
218 SET_PARAMS;
219 h->s.dsp.cavs_filter_lv(h->cy,h->l_stride,alpha,beta,tc,bs[0],bs[1]);
220 h->s.dsp.cavs_filter_cv(h->cu,h->c_stride,alpha,beta,tc,bs[0],bs[1]);
221 h->s.dsp.cavs_filter_cv(h->cv,h->c_stride,alpha,beta,tc,bs[0],bs[1]);
222 }
223 qp_avg = h->qp;
224 SET_PARAMS;
225 h->s.dsp.cavs_filter_lv(h->cy + 8,h->l_stride,alpha,beta,tc,bs[2],bs[3]);
226 h->s.dsp.cavs_filter_lh(h->cy + 8*h->l_stride,h->l_stride,alpha,beta,tc,
227 bs[6],bs[7]);
228
229 if(h->flags & B_AVAIL) {
230 qp_avg = (h->qp + h->top_qp[h->mbx] + 1) >> 1;
231 SET_PARAMS;
232 h->s.dsp.cavs_filter_lh(h->cy,h->l_stride,alpha,beta,tc,bs[4],bs[5]);
233 h->s.dsp.cavs_filter_ch(h->cu,h->c_stride,alpha,beta,tc,bs[4],bs[5]);
234 h->s.dsp.cavs_filter_ch(h->cv,h->c_stride,alpha,beta,tc,bs[4],bs[5]);
235 }
236 }
237 }
238 h->left_qp = h->qp;
239 h->top_qp[h->mbx] = h->qp;
240 }
241
242 #undef SET_PARAMS
243
244 /*****************************************************************************
245 *
246 * spatial intra prediction
247 *
248 ****************************************************************************/
249
250 static inline void load_intra_pred_luma(AVSContext *h, uint8_t *top,
251 uint8_t *left, int block) {
252 int i;
253
254 switch(block) {
255 case 0:
256 memcpy(&left[1],h->left_border_y,16);
257 left[0] = left[1];
258 left[17] = left[16];
259 memcpy(&top[1],&h->top_border_y[h->mbx*16],16);
260 top[17] = top[16];
261 top[0] = top[1];
262 if((h->flags & A_AVAIL) && (h->flags & B_AVAIL))
263 left[0] = top[0] = h->topleft_border_y;
264 break;
265 case 1:
266 for(i=0;i<8;i++)
267 left[i+1] = *(h->cy + 7 + i*h->l_stride);
268 memset(&left[9],left[8],9);
269 left[0] = left[1];
270 memcpy(&top[1],&h->top_border_y[h->mbx*16+8],8);
271 if(h->flags & C_AVAIL)
272 memcpy(&top[9],&h->top_border_y[(h->mbx + 1)*16],8);
273 else
274 memset(&top[9],top[8],9);
275 top[17] = top[16];
276 top[0] = top[1];
277 if(h->flags & B_AVAIL)
278 left[0] = top[0] = h->top_border_y[h->mbx*16+7];
279 break;
280 case 2:
281 memcpy(&left[1],&h->left_border_y[8],8);
282 memset(&left[9],left[8],9);
283 memcpy(&top[1],h->cy + 7*h->l_stride,16);
284 top[17] = top[16];
285 left[0] = h->left_border_y[7];
286 top[0] = top[1];
287 if(h->flags & A_AVAIL)
288 top[0] = left[0];
289 break;
290 case 3:
291 for(i=0;i<9;i++)
292 left[i] = *(h->cy + 7 + (i+7)*h->l_stride);
293 memset(&left[9],left[8],9);
294 memcpy(&top[0],h->cy + 7 + 7*h->l_stride,9);
295 memset(&top[9],top[8],9);
296 break;
297 }
298 }
299
300 static void intra_pred_vert(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
301 int y;
302 uint64_t a = *((uint64_t *)(&top[1]));
303 for(y=0;y<8;y++) {
304 *((uint64_t *)(d+y*stride)) = a;
305 }
306 }
307
308 static void intra_pred_horiz(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
309 int y;
310 uint64_t a;
311 for(y=0;y<8;y++) {
312 a = left[y+1] * 0x0101010101010101ULL;
313 *((uint64_t *)(d+y*stride)) = a;
314 }
315 }
316
317 static void intra_pred_dc_128(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
318 int y;
319 uint64_t a = 0x8080808080808080ULL;
320 for(y=0;y<8;y++)
321 *((uint64_t *)(d+y*stride)) = a;
322 }
323
324 static void intra_pred_plane(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
325 int x,y,ia;
326 int ih = 0;
327 int iv = 0;
328 uint8_t *cm = cropTbl + MAX_NEG_CROP;
329
330 for(x=0; x<4; x++) {
331 ih += (x+1)*(top[5+x]-top[3-x]);
332 iv += (x+1)*(left[5+x]-left[3-x]);
333 }
334 ia = (top[8]+left[8])<<4;
335 ih = (17*ih+16)>>5;
336 iv = (17*iv+16)>>5;
337 for(y=0; y<8; y++)
338 for(x=0; x<8; x++)
339 d[y*stride+x] = cm[(ia+(x-3)*ih+(y-3)*iv+16)>>5];
340 }
341
342 #define LOWPASS(ARRAY,INDEX) \
343 (( ARRAY[(INDEX)-1] + 2*ARRAY[(INDEX)] + ARRAY[(INDEX)+1] + 2) >> 2)
344
345 static void intra_pred_lp(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
346 int x,y;
347 for(y=0; y<8; y++)
348 for(x=0; x<8; x++)
349 d[y*stride+x] = (LOWPASS(top,x+1) + LOWPASS(left,y+1)) >> 1;
350 }
351
352 static void intra_pred_down_left(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
353 int x,y;
354 for(y=0; y<8; y++)
355 for(x=0; x<8; x++)
356 d[y*stride+x] = (LOWPASS(top,x+y+2) + LOWPASS(left,x+y+2)) >> 1;
357 }
358
359 static void intra_pred_down_right(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
360 int x,y;
361 for(y=0; y<8; y++)
362 for(x=0; x<8; x++)
363 if(x==y)
364 d[y*stride+x] = (left[1]+2*top[0]+top[1]+2)>>2;
365 else if(x>y)
366 d[y*stride+x] = LOWPASS(top,x-y);
367 else
368 d[y*stride+x] = LOWPASS(left,y-x);
369 }
370
371 static void intra_pred_lp_left(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
372 int x,y;
373 for(y=0; y<8; y++)
374 for(x=0; x<8; x++)
375 d[y*stride+x] = LOWPASS(left,y+1);
376 }
377
378 static void intra_pred_lp_top(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
379 int x,y;
380 for(y=0; y<8; y++)
381 for(x=0; x<8; x++)
382 d[y*stride+x] = LOWPASS(top,x+1);
383 }
384
385 #undef LOWPASS
386
387 static inline void modify_pred(const int_fast8_t *mod_table, int *mode) {
388 int newmode = mod_table[*mode];
389 if(newmode < 0) {
390 av_log(NULL, AV_LOG_ERROR, "Illegal intra prediction mode\n");
391 *mode = 0;
392 } else {
393 *mode = newmode;
394 }
395 }
396
397 /*****************************************************************************
398 *
399 * motion compensation
400 *
401 ****************************************************************************/
402
403 static inline void mc_dir_part(AVSContext *h,Picture *pic,int square,
404 int chroma_height,int delta,int list,uint8_t *dest_y,
405 uint8_t *dest_cb,uint8_t *dest_cr,int src_x_offset,
406 int src_y_offset,qpel_mc_func *qpix_op,
407 h264_chroma_mc_func chroma_op,vector_t *mv){
408 MpegEncContext * const s = &h->s;
409 const int mx= mv->x + src_x_offset*8;
410 const int my= mv->y + src_y_offset*8;
411 const int luma_xy= (mx&3) + ((my&3)<<2);
412 uint8_t * src_y = pic->data[0] + (mx>>2) + (my>>2)*h->l_stride;
413 uint8_t * src_cb= pic->data[1] + (mx>>3) + (my>>3)*h->c_stride;
414 uint8_t * src_cr= pic->data[2] + (mx>>3) + (my>>3)*h->c_stride;
415 int extra_width= 0; //(s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16;
416 int extra_height= extra_width;
417 int emu=0;
418 const int full_mx= mx>>2;
419 const int full_my= my>>2;
420 const int pic_width = 16*h->mb_width;
421 const int pic_height = 16*h->mb_height;
422
423 if(!pic->data[0])
424 return;
425 if(mx&7) extra_width -= 3;
426 if(my&7) extra_height -= 3;
427
428 if( full_mx < 0-extra_width
429 || full_my < 0-extra_height
430 || full_mx + 16/*FIXME*/ > pic_width + extra_width
431 || full_my + 16/*FIXME*/ > pic_height + extra_height){
432 ff_emulated_edge_mc(s->edge_emu_buffer, src_y - 2 - 2*h->l_stride, h->l_stride,
433 16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, pic_width, pic_height);
434 src_y= s->edge_emu_buffer + 2 + 2*h->l_stride;
435 emu=1;
436 }
437
438 qpix_op[luma_xy](dest_y, src_y, h->l_stride); //FIXME try variable height perhaps?
439 if(!square){
440 qpix_op[luma_xy](dest_y + delta, src_y + delta, h->l_stride);
441 }
442
443 if(emu){
444 ff_emulated_edge_mc(s->edge_emu_buffer, src_cb, h->c_stride,
445 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
446 src_cb= s->edge_emu_buffer;
447 }
448 chroma_op(dest_cb, src_cb, h->c_stride, chroma_height, mx&7, my&7);
449
450 if(emu){
451 ff_emulated_edge_mc(s->edge_emu_buffer, src_cr, h->c_stride,
452 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
453 src_cr= s->edge_emu_buffer;
454 }
455 chroma_op(dest_cr, src_cr, h->c_stride, chroma_height, mx&7, my&7);
456 }
457
458 static inline void mc_part_std(AVSContext *h,int square,int chroma_height,int delta,
459 uint8_t *dest_y,uint8_t *dest_cb,uint8_t *dest_cr,
460 int x_offset, int y_offset,qpel_mc_func *qpix_put,
461 h264_chroma_mc_func chroma_put,qpel_mc_func *qpix_avg,
462 h264_chroma_mc_func chroma_avg, vector_t *mv){
463 qpel_mc_func *qpix_op= qpix_put;
464 h264_chroma_mc_func chroma_op= chroma_put;
465
466 dest_y += 2*x_offset + 2*y_offset*h->l_stride;
467 dest_cb += x_offset + y_offset*h->c_stride;
468 dest_cr += x_offset + y_offset*h->c_stride;
469 x_offset += 8*h->mbx;
470 y_offset += 8*h->mby;
471
472 if(mv->ref >= 0){
473 Picture *ref= &h->DPB[mv->ref];
474 mc_dir_part(h, ref, square, chroma_height, delta, 0,
475 dest_y, dest_cb, dest_cr, x_offset, y_offset,
476 qpix_op, chroma_op, mv);
477
478 qpix_op= qpix_avg;
479 chroma_op= chroma_avg;
480 }
481
482 if((mv+MV_BWD_OFFS)->ref >= 0){
483 Picture *ref= &h->DPB[0];
484 mc_dir_part(h, ref, square, chroma_height, delta, 1,
485 dest_y, dest_cb, dest_cr, x_offset, y_offset,
486 qpix_op, chroma_op, mv+MV_BWD_OFFS);
487 }
488 }
489
490 static void inter_pred(AVSContext *h) {
491 /* always do 8x8 blocks TODO: are larger blocks worth it? */
492 mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 0, 0,
493 h->s.dsp.put_cavs_qpel_pixels_tab[1],
494 h->s.dsp.put_h264_chroma_pixels_tab[1],
495 h->s.dsp.avg_cavs_qpel_pixels_tab[1],
496 h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X0]);
497 mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 4, 0,
498 h->s.dsp.put_cavs_qpel_pixels_tab[1],
499 h->s.dsp.put_h264_chroma_pixels_tab[1],
500 h->s.dsp.avg_cavs_qpel_pixels_tab[1],
501 h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X1]);
502 mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 0, 4,
503 h->s.dsp.put_cavs_qpel_pixels_tab[1],
504 h->s.dsp.put_h264_chroma_pixels_tab[1],
505 h->s.dsp.avg_cavs_qpel_pixels_tab[1],
506 h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X2]);
507 mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 4, 4,
508 h->s.dsp.put_cavs_qpel_pixels_tab[1],
509 h->s.dsp.put_h264_chroma_pixels_tab[1],
510 h->s.dsp.avg_cavs_qpel_pixels_tab[1],
511 h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X3]);
512 /* set intra prediction modes to default values */
513 h->pred_mode_Y[3] = h->pred_mode_Y[6] = INTRA_L_LP;
514 h->top_pred_Y[h->mbx*2+0] = h->top_pred_Y[h->mbx*2+1] = INTRA_L_LP;
515 }
516
517 /*****************************************************************************
518 *
519 * motion vector prediction
520 *
521 ****************************************************************************/
522
523 static inline void set_mvs(vector_t *mv, enum block_t size) {
524 switch(size) {
525 case BLK_16X16:
526 mv[MV_STRIDE ] = mv[0];
527 mv[MV_STRIDE+1] = mv[0];
528 case BLK_16X8:
529 mv[1] = mv[0];
530 break;
531 case BLK_8X16:
532 mv[MV_STRIDE] = mv[0];
533 break;
534 }
535 }
536
537 static inline void store_mvs(AVSContext *h) {
538 h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 0] = h->mv[MV_FWD_X0];
539 h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 1] = h->mv[MV_FWD_X1];
540 h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 2] = h->mv[MV_FWD_X2];
541 h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 3] = h->mv[MV_FWD_X3];
542 }
543
544 static inline void scale_mv(AVSContext *h, int *d_x, int *d_y, vector_t *src, int distp) {
545 int den = h->scale_den[src->ref];
546
547 *d_x = (src->x*distp*den + 256 + (src->x>>31)) >> 9;
548 *d_y = (src->y*distp*den + 256 + (src->y>>31)) >> 9;
549 }
550
551 static inline void mv_pred_median(AVSContext *h, vector_t *mvP, vector_t *mvA, vector_t *mvB, vector_t *mvC) {
552 int ax, ay, bx, by, cx, cy;
553 int len_ab, len_bc, len_ca, len_mid;
554
555 /* scale candidates according to their temporal span */
556 scale_mv(h, &ax, &ay, mvA, mvP->dist);
557 scale_mv(h, &bx, &by, mvB, mvP->dist);
558 scale_mv(h, &cx, &cy, mvC, mvP->dist);
559 /* find the geometrical median of the three candidates */
560 len_ab = abs(ax - bx) + abs(ay - by);
561 len_bc = abs(bx - cx) + abs(by - cy);
562 len_ca = abs(cx - ax) + abs(cy - ay);
563 len_mid = mid_pred(len_ab, len_bc, len_ca);
564 if(len_mid == len_ab) {
565 mvP->x = cx;
566 mvP->y = cy;
567 } else if(len_mid == len_bc) {
568 mvP->x = ax;
569 mvP->y = ay;
570 } else {
571 mvP->x = bx;
572 mvP->y = by;
573 }
574 }
575
576 static inline void mv_pred_direct(AVSContext *h, vector_t *pmv_fw,
577 vector_t *col_mv) {
578 vector_t *pmv_bw = pmv_fw + MV_BWD_OFFS;
579 int den = h->direct_den[col_mv->ref];
580 int m = col_mv->x >> 31;
581
582 pmv_fw->dist = h->dist[1];
583 pmv_bw->dist = h->dist[0];
584 pmv_fw->ref = 1;
585 pmv_bw->ref = 0;
586 /* scale the co-located motion vector according to its temporal span */
587 pmv_fw->x = (((den+(den*col_mv->x*pmv_fw->dist^m)-m-1)>>14)^m)-m;
588 pmv_bw->x = m-(((den+(den*col_mv->x*pmv_bw->dist^m)-m-1)>>14)^m);
589 m = col_mv->y >> 31;
590 pmv_fw->y = (((den+(den*col_mv->y*pmv_fw->dist^m)-m-1)>>14)^m)-m;
591 pmv_bw->y = m-(((den+(den*col_mv->y*pmv_bw->dist^m)-m-1)>>14)^m);
592 }
593
594 static inline void mv_pred_sym(AVSContext *h, vector_t *src, enum block_t size) {
595 vector_t *dst = src + MV_BWD_OFFS;
596
597 /* backward mv is the scaled and negated forward mv */
598 dst->x = -((src->x * h->sym_factor + 256) >> 9);
599 dst->y = -((src->y * h->sym_factor + 256) >> 9);
600 dst->ref = 0;
601 dst->dist = h->dist[0];
602 set_mvs(dst, size);
603 }
604
605 static void mv_pred(AVSContext *h, enum mv_loc_t nP, enum mv_loc_t nC,
606 enum mv_pred_t mode, enum block_t size, int ref) {
607 vector_t *mvP = &h->mv[nP];
608 vector_t *mvA = &h->mv[nP-1];
609 vector_t *mvB = &h->mv[nP-4];
610 vector_t *mvC = &h->mv[nC];
611 int mvAref = mvA->ref;
612 int mvBref = mvB->ref;
613 int mvCref;
614
615 mvP->ref = ref;
616 mvP->dist = h->dist[mvP->ref];
617 if(mvC->ref == NOT_AVAIL)
618 mvC = &h->mv[nP-5]; // set to top-left (mvD)
619 mvCref = mvC->ref;
620 if(mode == MV_PRED_PSKIP) {
621 if((mvAref == NOT_AVAIL) || (mvBref == NOT_AVAIL) ||
622 ((mvA->x | mvA->y | mvA->ref) == 0) ||
623 ((mvB->x | mvB->y | mvB->ref) == 0) ) {
624 mvP->x = mvP->y = 0;
625 set_mvs(mvP,size);
626 return;
627 }
628 }
629 /* if there is only one suitable candidate, take it */
630 if((mvAref >= 0) && (mvBref < 0) && (mvCref < 0)) {
631 mvP->x = mvA->x;
632 mvP->y = mvA->y;
633 } else if((mvAref < 0) && (mvBref >= 0) && (mvCref < 0)) {
634 mvP->x = mvB->x;
635 mvP->y = mvB->y;
636 } else if((mvAref < 0) && (mvBref < 0) && (mvCref >= 0)) {
637 mvP->x = mvC->x;
638 mvP->y = mvC->y;
639 } else {
640 switch(mode) {
641 case MV_PRED_LEFT:
642 if(mvAref == mvP->ref) {
643 mvP->x = mvA->x;
644 mvP->y = mvA->y;
645 } else
646 mv_pred_median(h, mvP, mvA, mvB, mvC);
647 break;
648 case MV_PRED_TOP:
649 if(mvBref == mvP->ref) {
650 mvP->x = mvB->x;
651 mvP->y = mvB->y;
652 } else
653 mv_pred_median(h, mvP, mvA, mvB, mvC);
654 break;
655 case MV_PRED_TOPRIGHT:
656 if(mvCref == mvP->ref) {
657 mvP->x = mvC->x;
658 mvP->y = mvC->y;
659 } else
660 mv_pred_median(h, mvP, mvA, mvB, mvC);
661 break;
662 default:
663 mv_pred_median(h, mvP, mvA, mvB, mvC);
664 break;
665 }
666 }
667 if(mode < MV_PRED_PSKIP) {
668 mvP->x += get_se_golomb(&h->s.gb);
669 mvP->y += get_se_golomb(&h->s.gb);
670 }
671 set_mvs(mvP,size);
672 }
673
674 /*****************************************************************************
675 *
676 * residual data decoding
677 *
678 ****************************************************************************/
679
680 /** kth-order exponential golomb code */
681 static inline int get_ue_code(GetBitContext *gb, int order) {
682 if(order) {
683 int ret = get_ue_golomb(gb) << order;
684 return ret + get_bits(gb,order);
685 }
686 return get_ue_golomb(gb);
687 }
688
689 /**
690 * decode coefficients from one 8x8 block, dequantize, inverse transform
691 * and add them to sample block
692 * @param r pointer to 2D VLC table
693 * @param esc_golomb_order escape codes are k-golomb with this order k
694 * @param qp quantizer
695 * @param dst location of sample block
696 * @param stride line stride in frame buffer
697 */
698 static int decode_residual_block(AVSContext *h, GetBitContext *gb,
699 const residual_vlc_t *r, int esc_golomb_order,
700 int qp, uint8_t *dst, int stride) {
701 int i,pos = -1;
702 int level_code, esc_code, level, run, mask;
703 int level_buf[64];
704 int run_buf[64];
705 int dqm = dequant_mul[qp];
706 int dqs = dequant_shift[qp];
707 int dqa = 1 << (dqs - 1);
708 const uint8_t *scantab = ff_zigzag_direct;
709 DCTELEM block[64];
710
711 memset(block,0,64*sizeof(DCTELEM));
712 for(i=0;i<65;i++) {
713 level_code = get_ue_code(gb,r->golomb_order);
714 if(level_code >= ESCAPE_CODE) {
715 run = (level_code - ESCAPE_CODE) >> 1;
716 esc_code = get_ue_code(gb,esc_golomb_order);
717 level = esc_code + (run > r->max_run ? 1 : r->level_add[run]);
718 while(level > r->inc_limit)
719 r++;
720 mask = -(level_code & 1);
721 level = (level^mask) - mask;
722 } else {
723 if(level_code < 0)
724 return -1;
725 level = r->rltab[level_code][0];
726 if(!level) //end of block signal
727 break;
728 run = r->rltab[level_code][1];
729 r += r->rltab[level_code][2];
730 }
731 level_buf[i] = level;
732 run_buf[i] = run;
733 }
734 /* inverse scan and dequantization */
735 while(--i >= 0){
736 pos += 1 + run_buf[i];
737 if(pos > 63) {
738 av_log(h->s.avctx, AV_LOG_ERROR,
739 "position out of block bounds at pic %d MB(%d,%d)\n",
740 h->picture.poc, h->mbx, h->mby);
741 return -1;
742 }
743 block[scantab[pos]] = (level_buf[i]*dqm + dqa) >> dqs;
744 }
745 h->s.dsp.cavs_idct8_add(dst,block,stride);
746 return 0;
747 }
748
749
750 static inline void decode_residual_chroma(AVSContext *h) {
751 if(h->cbp & (1<<4))
752 decode_residual_block(h,&h->s.gb,chroma_2dvlc,0, chroma_qp[h->qp],
753 h->cu,h->c_stride);
754 if(h->cbp & (1<<5))
755 decode_residual_block(h,&h->s.gb,chroma_2dvlc,0, chroma_qp[h->qp],
756 h->cv,h->c_stride);
757 }
758
759 static inline void decode_residual_inter(AVSContext *h) {
760 int block;
761
762 /* get coded block pattern */
763 h->cbp = cbp_tab[get_ue_golomb(&h->s.gb)][1];
764 /* get quantizer */
765 if(h->cbp && !h->qp_fixed)
766 h->qp += get_se_golomb(&h->s.gb);
767 for(block=0;block<4;block++)
768 if(h->cbp & (1<<block))
769 decode_residual_block(h,&h->s.gb,inter_2dvlc,0,h->qp,
770 h->cy + h->luma_scan[block], h->l_stride);
771 decode_residual_chroma(h);
772 }
773
774 /*****************************************************************************
775 *
776 * macroblock level
777 *
778 ****************************************************************************/
779
780 /**
781 * initialise predictors for motion vectors and intra prediction
782 */
783 static inline void init_mb(AVSContext *h) {
784 int i;
785
786 /* copy predictors from top line (MB B and C) into cache */
787 for(i=0;i<3;i++) {
788 h->mv[MV_FWD_B2+i] = h->top_mv[0][h->mbx*2+i];
789 h->mv[MV_BWD_B2+i] = h->top_mv[1][h->mbx*2+i];
790 }
791 h->pred_mode_Y[1] = h->top_pred_Y[h->mbx*2+0];
792 h->pred_mode_Y[2] = h->top_pred_Y[h->mbx*2+1];
793 /* clear top predictors if MB B is not available */
794 if(!(h->flags & B_AVAIL)) {
795 h->mv[MV_FWD_B2] = un_mv;
796 h->mv[MV_FWD_B3] = un_mv;
797 h->mv[MV_BWD_B2] = un_mv;
798 h->mv[MV_BWD_B3] = un_mv;
799 h->pred_mode_Y[1] = h->pred_mode_Y[2] = NOT_AVAIL;
800 h->flags &= ~(C_AVAIL|D_AVAIL);
801 } else if(h->mbx) {
802 h->flags |= D_AVAIL;
803 }
804 if(h->mbx == h->mb_width-1) //MB C not available
805 h->flags &= ~C_AVAIL;
806 /* clear top-right predictors if MB C is not available */
807 if(!(h->flags & C_AVAIL)) {
808 h->mv[MV_FWD_C2] = un_mv;
809 h->mv[MV_BWD_C2] = un_mv;
810 }
811 /* clear top-left predictors if MB D is not available */
812 if(!(h->flags & D_AVAIL)) {
813 h->mv[MV_FWD_D3] = un_mv;
814 h->mv[MV_BWD_D3] = un_mv;
815 }
816 /* set pointer for co-located macroblock type */
817 h->col_type = &h->col_type_base[h->mby*h->mb_width + h->mbx];
818 }
819
820 static inline void check_for_slice(AVSContext *h);
821
822 /**
823 * save predictors for later macroblocks and increase
824 * macroblock address
825 * @returns 0 if end of frame is reached, 1 otherwise
826 */
827 static inline int next_mb(AVSContext *h) {
828 int i;
829
830 h->flags |= A_AVAIL;
831 h->cy += 16;
832 h->cu += 8;
833 h->cv += 8;
834 /* copy mvs as predictors to the left */
835 for(i=0;i<=20;i+=4)
836 h->mv[i] = h->mv[i+2];
837 /* copy bottom mvs from cache to top line */
838 h->top_mv[0][h->mbx*2+0] = h->mv[MV_FWD_X2];
839 h->top_mv[0][h->mbx*2+1] = h->mv[MV_FWD_X3];
840 h->top_mv[1][h->mbx*2+0] = h->mv[MV_BWD_X2];
841 h->top_mv[1][h->mbx*2+1] = h->mv[MV_BWD_X3];
842 /* next MB address */
843 h->mbx++;
844 if(h->mbx == h->mb_width) { //new mb line
845 h->flags = B_AVAIL|C_AVAIL;
846 /* clear left pred_modes */
847 h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL;
848 /* clear left mv predictors */
849 for(i=0;i<=20;i+=4)
850 h->mv[i] = un_mv;
851 h->mbx = 0;
852 h->mby++;
853 /* re-calculate sample pointers */
854 h->cy = h->picture.data[0] + h->mby*16*h->l_stride;
855 h->cu = h->picture.data[1] + h->mby*8*h->c_stride;
856 h->cv = h->picture.data[2] + h->mby*8*h->c_stride;
857 if(h->mby == h->mb_height) { //frame end
858 return 0;
859 } else {
860 //check_for_slice(h);
861 }
862 }
863 return 1;
864 }
865
866 static void decode_mb_i(AVSContext *h) {
867 GetBitContext *gb = &h->s.gb;
868 int block, pred_mode_uv;
869 uint8_t top[18];
870 uint8_t left[18];
871 uint8_t *d;
872
873 init_mb(h);
874
875 /* get intra prediction modes from stream */
876 for(block=0;block<4;block++) {
877 int nA,nB,predpred;
878 int pos = scan3x3[block];
879
880 nA = h->pred_mode_Y[pos-1];
881 nB = h->pred_mode_Y[pos-3];
882 if((nA == NOT_AVAIL) || (nB == NOT_AVAIL))
883 predpred = 2;
884 else
885 predpred = FFMIN(nA,nB);
886 if(get_bits1(gb))
887 h->pred_mode_Y[pos] = predpred;
888 else {
889 h->pred_mode_Y[pos] = get_bits(gb,2);
890 if(h->pred_mode_Y[pos] >= predpred)
891 h->pred_mode_Y[pos]++;
892 }
893 }
894 pred_mode_uv = get_ue_golomb(gb);
895 if(pred_mode_uv > 6) {
896 av_log(h->s.avctx, AV_LOG_ERROR, "illegal intra chroma pred mode\n");
897 pred_mode_uv = 0;
898 }
899
900 /* save pred modes before they get modified */
901 h->pred_mode_Y[3] = h->pred_mode_Y[5];
902 h->pred_mode_Y[6] = h->pred_mode_Y[8];
903 h->top_pred_Y[h->mbx*2+0] = h->pred_mode_Y[7];
904 h->top_pred_Y[h->mbx*2+1] = h->pred_mode_Y[8];
905
906 /* modify pred modes according to availability of neighbour samples */
907 if(!(h->flags & A_AVAIL)) {
908 modify_pred(left_modifier_l, &h->pred_mode_Y[4] );
909 modify_pred(left_modifier_l, &h->pred_mode_Y[7] );
910 modify_pred(left_modifier_c, &pred_mode_uv );
911 }
912 if(!(h->flags & B_AVAIL)) {
913 modify_pred(top_modifier_l, &h->pred_mode_Y[4] );
914 modify_pred(top_modifier_l, &h->pred_mode_Y[5] );
915 modify_pred(top_modifier_c, &pred_mode_uv );
916 }
917
918 /* get coded block pattern */
919 if(h->pic_type == FF_I_TYPE)
920 h->cbp = cbp_tab[get_ue_golomb(gb)][0];
921 if(h->cbp && !h->qp_fixed)
922 h->qp += get_se_golomb(gb); //qp_delta
923
924 /* luma intra prediction interleaved with residual decode/transform/add */
925 for(block=0;block<4;block++) {
926 d = h->cy + h->luma_scan[block];
927 load_intra_pred_luma(h, top, left, block);
928 h->intra_pred_l[h->pred_mode_Y[scan3x3[block]]]
929 (d, top, left, h->l_stride);
930 if(h->cbp & (1<<block))
931 decode_residual_block(h,gb,intra_2dvlc,1,h->qp,d,h->l_stride);
932 }
933
934 /* chroma intra prediction */
935 /* extend borders by one pixel */
936 h->left_border_u[9] = h->left_border_u[8];
937 h->left_border_v[9] = h->left_border_v[8];
938 h->top_border_u[h->mbx*10+9] = h->top_border_u[h->mbx*10+8];
939 h->top_border_v[h->mbx*10+9] = h->top_border_v[h->mbx*10+8];
940 if(h->mbx && h->mby) {
941 h->top_border_u[h->mbx*10] = h->left_border_u[0] = h->topleft_border_u;
942 h->top_border_v[h->mbx*10] = h->left_border_v[0] = h->topleft_border_v;
943 } else {
944 h->left_border_u[0] = h->left_border_u[1];
945 h->left_border_v[0] = h->left_border_v[1];
946 h->top_border_u[h->mbx*10] = h->top_border_u[h->mbx*10+1];
947 h->top_border_v[h->mbx*10] = h->top_border_v[h->mbx*10+1];
948 }
949 h->intra_pred_c[pred_mode_uv](h->cu, &h->top_border_u[h->mbx*10],
950 h->left_border_u, h->c_stride);
951 h->intra_pred_c[pred_mode_uv](h->cv, &h->top_border_v[h->mbx*10],
952 h->left_border_v, h->c_stride);
953
954 decode_residual_chroma(h);
955 filter_mb(h,I_8X8);
956
957 /* mark motion vectors as intra */
958 h->mv[MV_FWD_X0] = intra_mv;
959 set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
960 h->mv[MV_BWD_X0] = intra_mv;
961 set_mvs(&h->mv[MV_BWD_X0], BLK_16X16);
962 if(h->pic_type != FF_B_TYPE)
963 *h->col_type = I_8X8;
964 }
965
966 static void decode_mb_p(AVSContext *h, enum mb_t mb_type) {
967 GetBitContext *gb = &h->s.gb;
968 int ref[4];
969
970 init_mb(h);
971 switch(mb_type) {
972 case P_SKIP:
973 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_PSKIP, BLK_16X16, 0);
974 break;
975 case P_16X16:
976 ref[0] = h->ref_flag ? 0 : get_bits1(gb);
977 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16,ref[0]);
978 break;
979 case P_16X8:
980 ref[0] = h->ref_flag ? 0 : get_bits1(gb);
981 ref[2] = h->ref_flag ? 0 : get_bits1(gb);
982 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, ref[0]);
983 mv_pred(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, ref[2]);
984 break;
985 case P_8X16:
986 ref[0] = h->ref_flag ? 0 : get_bits1(gb);
987 ref[1] = h->ref_flag ? 0 : get_bits1(gb);
988 mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, ref[0]);
989 mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_TOPRIGHT, BLK_8X16, ref[1]);
990 break;
991 case P_8X8:
992 ref[0] = h->ref_flag ? 0 : get_bits1(gb);
993 ref[1] = h->ref_flag ? 0 : get_bits1(gb);
994 ref[2] = h->ref_flag ? 0 : get_bits1(gb);
995 ref[3] = h->ref_flag ? 0 : get_bits1(gb);
996 mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_MEDIAN, BLK_8X8, ref[0]);
997 mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_MEDIAN, BLK_8X8, ref[1]);
998 mv_pred(h, MV_FWD_X2, MV_FWD_X1, MV_PRED_MEDIAN, BLK_8X8, ref[2]);
999 mv_pred(h, MV_FWD_X3, MV_FWD_X0, MV_PRED_MEDIAN, BLK_8X8, ref[3]);
1000 }
1001 inter_pred(h);
1002 store_mvs(h);
1003 if(mb_type != P_SKIP)
1004 decode_residual_inter(h);
1005 filter_mb(h,mb_type);
1006 *h->col_type = mb_type;
1007 }
1008
1009 static void decode_mb_b(AVSContext *h, enum mb_t mb_type) {
1010 int block;
1011 enum sub_mb_t sub_type[4];
1012 int flags;
1013
1014 init_mb(h);
1015
1016 /* reset all MVs */
1017 h->mv[MV_FWD_X0] = dir_mv;
1018 set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
1019 h->mv[MV_BWD_X0] = dir_mv;
1020 set_mvs(&h->mv[MV_BWD_X0], BLK_16X16);
1021 switch(mb_type) {
1022 case B_SKIP:
1023 case B_DIRECT:
1024 if(!(*h->col_type)) {
1025 /* intra MB at co-location, do in-plane prediction */
1026 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_BSKIP, BLK_16X16, 1);
1027 mv_pred(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_BSKIP, BLK_16X16, 0);
1028 } else
1029 /* direct prediction from co-located P MB, block-wise */
1030 for(block=0;block<4;block++)
1031 mv_pred_direct(h,&h->mv[mv_scan[block]],
1032 &h->col_mv[(h->mby*h->mb_width+h->mbx)*4 + block]);
1033 break;
1034 case B_FWD_16X16:
1035 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16, 1);
1036 break;
1037 case B_SYM_16X16:
1038 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16, 1);
1039 mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_16X16);
1040 break;
1041 case B_BWD_16X16:
1042 mv_pred(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_MEDIAN, BLK_16X16, 0);
1043 break;
1044 case B_8X8:
1045 for(block=0;block<4;block++)
1046 sub_type[block] = get_bits(&h->s.gb,2);
1047 for(block=0;block<4;block++) {
1048 switch(sub_type[block]) {
1049 case B_SUB_DIRECT:
1050 if(!(*h->col_type)) {
1051 /* intra MB at co-location, do in-plane prediction */
1052 mv_pred(h, mv_scan[block], mv_scan[block]-3,
1053 MV_PRED_BSKIP, BLK_8X8, 1);
1054 mv_pred(h, mv_scan[block]+MV_BWD_OFFS,
1055 mv_scan[block]-3+MV_BWD_OFFS,
1056 MV_PRED_BSKIP, BLK_8X8, 0);
1057 } else
1058 mv_pred_direct(h,&h->mv[mv_scan[block]],
1059 &h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + block]);
1060 break;
1061 case B_SUB_FWD:
1062 mv_pred(h, mv_scan[block], mv_scan[block]-3,
1063 MV_PRED_MEDIAN, BLK_8X8, 1);
1064 break;
1065 case B_SUB_SYM:
1066 mv_pred(h, mv_scan[block], mv_scan[block]-3,
1067 MV_PRED_MEDIAN, BLK_8X8, 1);
1068 mv_pred_sym(h, &h->mv[mv_scan[block]], BLK_8X8);
1069 break;
1070 }
1071 }
1072 for(block=0;block<4;block++) {
1073 if(sub_type[block] == B_SUB_BWD)
1074 mv_pred(h, mv_scan[block]+MV_BWD_OFFS,
1075 mv_scan[block]+MV_BWD_OFFS-3,
1076 MV_PRED_MEDIAN, BLK_8X8, 0);
1077 }
1078 break;
1079 default:
1080 assert((mb_type > B_SYM_16X16) && (mb_type < B_8X8));
1081 flags = b_partition_flags[(mb_type-1)>>1];
1082 if(mb_type & 1) { /* 16x8 macroblock types */
1083 if(flags & FWD0)
1084 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, 1);
1085 if(flags & SYM0) {
1086 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, 1);
1087 mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_16X8);
1088 }
1089 if(flags & FWD1)
1090 mv_pred(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, 1);
1091 if(flags & SYM1) {
1092 mv_pred(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, 1);
1093 mv_pred_sym(h, &h->mv[9], BLK_16X8);
1094 }
1095 if(flags & BWD0)
1096 mv_pred(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_TOP, BLK_16X8, 0);
1097 if(flags & BWD1)
1098 mv_pred(h, MV_BWD_X2, MV_BWD_A1, MV_PRED_LEFT, BLK_16X8, 0);
1099 } else { /* 8x16 macroblock types */
1100 if(flags & FWD0)
1101 mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, 1);
1102 if(flags & SYM0) {
1103 mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, 1);
1104 mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_8X16);
1105 }
1106 if(flags & FWD1)
1107 mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_TOPRIGHT,BLK_8X16, 1);
1108 if(flags & SYM1) {
1109 mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_TOPRIGHT,BLK_8X16, 1);
1110 mv_pred_sym(h, &h->mv[6], BLK_8X16);
1111 }
1112 if(flags & BWD0)
1113 mv_pred(h, MV_BWD_X0, MV_BWD_B3, MV_PRED_LEFT, BLK_8X16, 0);
1114 if(flags & BWD1)
1115 mv_pred(h, MV_BWD_X1, MV_BWD_C2, MV_PRED_TOPRIGHT,BLK_8X16, 0);
1116 }
1117 }
1118 inter_pred(h);
1119 if(mb_type != B_SKIP)
1120 decode_residual_inter(h);
1121 filter_mb(h,mb_type);
1122 }
1123
1124 /*****************************************************************************
1125 *
1126 * slice level
1127 *
1128 ****************************************************************************/
1129
1130 static inline int decode_slice_header(AVSContext *h, GetBitContext *gb) {
1131 if(h->stc > 0xAF)
1132 av_log(h->s.avctx, AV_LOG_ERROR, "unexpected start code 0x%02x\n", h->stc);
1133 h->mby = h->stc;
1134 if((h->mby == 0) && (!h->qp_fixed)){
1135 h->qp_fixed = get_bits1(gb);
1136 h->qp = get_bits(gb,6);
1137 }
1138 /* inter frame or second slice can have weighting params */
1139 if((h->pic_type != FF_I_TYPE) || (!h->pic_structure && h->mby >= h->mb_width/2))
1140 if(get_bits1(gb)) { //slice_weighting_flag
1141 av_log(h->s.avctx, AV_LOG_ERROR,
1142 "weighted prediction not yet supported\n");
1143 }
1144 return 0;
1145 }
1146
1147 static inline void check_for_slice(AVSContext *h) {
1148 GetBitContext *gb = &h->s.gb;
1149 int align;
1150 align = (-get_bits_count(gb)) & 7;
1151 if((show_bits_long(gb,24+align) & 0xFFFFFF) == 0x000001) {
1152 get_bits_long(gb,24+align);
1153 h->stc = get_bits(gb,8);
1154 decode_slice_header(h,gb);
1155 }
1156 }
1157
1158 /*****************************************************************************
1159 *
1160 * frame level
1161 *
1162 ****************************************************************************/
1163
1164 static void init_pic(AVSContext *h) {
1165 int i;
1166
1167 /* clear some predictors */
1168 for(i=0;i<=20;i+=4)
1169 h->mv[i] = un_mv;
1170 h->mv[MV_BWD_X0] = dir_mv;
1171 set_mvs(&h->mv[MV_BWD_X0], BLK_16X16);
1172 h->mv[MV_FWD_X0] = dir_mv;
1173 set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
1174 h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL;
1175 h->cy = h->picture.data[0];
1176 h->cu = h->picture.data[1];
1177 h->cv = h->picture.data[2];
1178 h->l_stride = h->picture.linesize[0];
1179 h->c_stride = h->picture.linesize[1];
1180 h->luma_scan[2] = 8*h->l_stride;
1181 h->luma_scan[3] = 8*h->l_stride+8;
1182 h->mbx = h->mby = 0;
1183 h->flags = 0;
1184 }
1185
1186 static int decode_pic(AVSContext *h) {
1187 MpegEncContext *s = &h->s;
1188 int skip_count;
1189 enum mb_t mb_type;
1190
1191 if (!s->context_initialized) {
1192 if (MPV_common_init(s) < 0)
1193 return -1;
1194 }
1195 get_bits(&s->gb,16);//bbv_dwlay
1196 if(h->stc == PIC_PB_START_CODE) {
1197 h->pic_type = get_bits(&s->gb,2) + FF_I_TYPE;
1198 /* make sure we have the reference frames we need */
1199 if(!h->DPB[0].data[0] ||
1200 (!h->DPB[1].data[0] && h->pic_type == FF_B_TYPE))
1201 return -1;
1202 } else {
1203 h->pic_type = FF_I_TYPE;
1204 if(get_bits1(&s->gb))
1205 get_bits(&s->gb,16);//time_code
1206 }
1207 /* release last B frame */
1208 if(h->picture.data[0])
1209 s->avctx->release_buffer(s->avctx, (AVFrame *)&h->picture);
1210
1211 s->avctx->get_buffer(s->avctx, (AVFrame *)&h->picture);
1212 init_pic(h);
1213 h->picture.poc = get_bits(&s->gb,8)*2;
1214
1215 /* get temporal distances and MV scaling factors */
1216 if(h->pic_type != FF_B_TYPE) {
1217 h->dist[0] = (h->picture.poc - h->DPB[0].poc + 512) % 512;
1218 } else {
1219 h->dist[0] = (h->DPB[0].poc - h->picture.poc + 512) % 512;
1220 }
1221 h->dist[1] = (h->picture.poc - h->DPB[1].poc + 512) % 512;
1222 h->scale_den[0] = h->dist[0] ? 512/h->dist[0] : 0;
1223 h->scale_den[1] = h->dist[1] ? 512/h->dist[1] : 0;
1224 if(h->pic_type == FF_B_TYPE) {
1225 h->sym_factor = h->dist[0]*h->scale_den[1];
1226 } else {
1227 h->direct_den[0] = h->dist[0] ? 16384/h->dist[0] : 0;
1228 h->direct_den[1] = h->dist[1] ? 16384/h->dist[1] : 0;
1229 }
1230
1231 if(s->low_delay)
1232 get_ue_golomb(&s->gb); //bbv_check_times
1233 h->progressive = get_bits1(&s->gb);
1234 if(h->progressive)
1235 h->pic_structure = 1;
1236 else if(!(h->pic_structure = get_bits1(&s->gb) && (h->stc == PIC_PB_START_CODE)) )
1237 get_bits1(&s->gb); //advanced_pred_mode_disable
1238 skip_bits1(&s->gb); //top_field_first
1239 skip_bits1(&s->gb); //repeat_first_field
1240 h->qp_fixed = get_bits1(&s->gb);
1241 h->qp = get_bits(&s->gb,6);
1242 if(h->pic_type == FF_I_TYPE) {
1243 if(!h->progressive && !h->pic_structure)
1244 skip_bits1(&s->gb);//what is this?
1245 skip_bits(&s->gb,4); //reserved bits
1246 } else {
1247 if(!(h->pic_type == FF_B_TYPE && h->pic_structure == 1))
1248 h->ref_flag = get_bits1(&s->gb);
1249 skip_bits(&s->gb,4); //reserved bits
1250 h->skip_mode_flag = get_bits1(&s->gb);
1251 }
1252 h->loop_filter_disable = get_bits1(&s->gb);
1253 if(!h->loop_filter_disable && get_bits1(&s->gb)) {
1254 h->alpha_offset = get_se_golomb(&s->gb);
1255 h->beta_offset = get_se_golomb(&s->gb);
1256 } else {
1257 h->alpha_offset = h->beta_offset = 0;
1258 }
1259 check_for_slice(h);
1260 if(h->pic_type == FF_I_TYPE) {
1261 do {
1262 decode_mb_i(h);
1263 } while(next_mb(h));
1264 } else if(h->pic_type == FF_P_TYPE) {
1265 do {
1266 if(h->skip_mode_flag) {
1267 skip_count = get_ue_golomb(&s->gb);
1268 while(skip_count--) {
1269 decode_mb_p(h,P_SKIP);
1270 if(!next_mb(h))
1271 goto done;
1272 }
1273 mb_type = get_ue_golomb(&s->gb) + P_16X16;
1274 } else
1275 mb_type = get_ue_golomb(&s->gb) + P_SKIP;
1276 if(mb_type > P_8X8) {
1277 h->cbp = cbp_tab[mb_type - P_8X8 - 1][0];
1278 decode_mb_i(h);
1279 } else
1280 decode_mb_p(h,mb_type);
1281 } while(next_mb(h));
1282 } else { /* FF_B_TYPE */
1283 do {
1284 if(h->skip_mode_flag) {
1285 skip_count = get_ue_golomb(&s->gb);
1286 while(skip_count--) {
1287 decode_mb_b(h,B_SKIP);
1288 if(!next_mb(h))
1289 goto done;
1290 }
1291 mb_type = get_ue_golomb(&s->gb) + B_DIRECT;
1292 } else
1293 mb_type = get_ue_golomb(&s->gb) + B_SKIP;
1294 init_mb(h);
1295 if(mb_type > B_8X8) {
1296 h->cbp = cbp_tab[mb_type - B_8X8 - 1][0];
1297 decode_mb_i(h);
1298 } else
1299 decode_mb_b(h,mb_type);
1300 } while(next_mb(h));
1301 }
1302 done:
1303 if(h->pic_type != FF_B_TYPE) {
1304 if(h->DPB[1].data[0])
1305 s->avctx->release_buffer(s->avctx, (AVFrame *)&h->DPB[1]);
1306 memcpy(&h->DPB[1], &h->DPB[0], sizeof(Picture));
1307 memcpy(&h->DPB[0], &h->picture, sizeof(Picture));
1308 memset(&h->picture,0,sizeof(Picture));
1309 }
1310 return 0;
1311 }
1312
1313 /*****************************************************************************
1314 *
1315 * headers and interface
1316 *
1317 ****************************************************************************/
1318
1319 /**
1320 * some predictions require data from the top-neighbouring macroblock.
1321 * this data has to be stored for one complete row of macroblocks
1322 * and this storage space is allocated here
1323 */
1324 static void init_top_lines(AVSContext *h) {
1325 /* alloc top line of predictors */
1326 h->top_qp = av_malloc( h->mb_width);
1327 h->top_mv[0] = av_malloc((h->mb_width*2+1)*sizeof(vector_t));
1328 h->top_mv[1] = av_malloc((h->mb_width*2+1)*sizeof(vector_t));
1329 h->top_pred_Y = av_malloc( h->mb_width*2*sizeof(*h->top_pred_Y));
1330 h->top_border_y = av_malloc((h->mb_width+1)*16);
1331 h->top_border_u = av_malloc((h->mb_width)*10);
1332 h->top_border_v = av_malloc((h->mb_width)*10);
1333
1334 /* alloc space for co-located MVs and types */
1335 h->col_mv = av_malloc( h->mb_width*h->mb_height*4*sizeof(vector_t));
1336 h->col_type_base = av_malloc(h->mb_width*h->mb_height);
1337 }
1338
1339 static int decode_seq_header(AVSContext *h) {
1340 MpegEncContext *s = &h->s;
1341 extern const AVRational ff_frame_rate_tab[];
1342 int frame_rate_code;
1343
1344 h->profile = get_bits(&s->gb,8);
1345 h->level = get_bits(&s->gb,8);
1346 skip_bits1(&s->gb); //progressive sequence
1347 s->width = get_bits(&s->gb,14);
1348 s->height = get_bits(&s->gb,14);
1349 skip_bits(&s->gb,2); //chroma format
1350 skip_bits(&s->gb,3); //sample_precision
1351 h->aspect_ratio = get_bits(&s->gb,4);
1352 frame_rate_code = get_bits(&s->gb,4);
1353 skip_bits(&s->gb,18);//bit_rate_lower
1354 skip_bits1(&s->gb); //marker_bit
1355 skip_bits(&s->gb,12);//bit_rate_upper
1356 s->low_delay = get_bits1(&s->gb);
1357 h->mb_width = (s->width + 15) >> 4;
1358 h->mb_height = (s->height + 15) >> 4;
1359 h->s.avctx->time_base.den = ff_frame_rate_tab[frame_rate_code].num;
1360 h->s.avctx->time_base.num = ff_frame_rate_tab[frame_rate_code].den;
1361 h->s.avctx->width = s->width;
1362 h->s.avctx->height = s->height;
1363 if(!h->top_qp)
1364 init_top_lines(h);
1365 return 0;
1366 }
1367
1368 /**
1369 * finds the end of the current frame in the bitstream.
1370 * @return the position of the first byte of the next frame, or -1
1371 */
1372 int ff_cavs_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size) {
1373 int pic_found, i;
1374 uint32_t state;
1375
1376 pic_found= pc->frame_start_found;
1377 state= pc->state;
1378
1379 i=0;
1380 if(!pic_found){
1381 for(i=0; i<buf_size; i++){
1382 state= (state<<8) | buf[i];
1383 if(state == PIC_I_START_CODE || state == PIC_PB_START_CODE){
1384 i++;
1385 pic_found=1;
1386 break;
1387 }
1388 }
1389 }
1390
1391 if(pic_found){
1392 /* EOF considered as end of frame */
1393 if (buf_size == 0)
1394 return 0;
1395 for(; i<buf_size; i++){
1396 state= (state<<8) | buf[i];
1397 if((state&0xFFFFFF00) == 0x100){
1398 if(state < SLICE_MIN_START_CODE || state > SLICE_MAX_START_CODE){
1399 pc->frame_start_found=0;
1400 pc->state=-1;
1401 return i-3;
1402 }
1403 }
1404 }
1405 }
1406 pc->frame_start_found= pic_found;
1407 pc->state= state;
1408 return END_NOT_FOUND;
1409 }
1410
1411 void ff_cavs_flush(AVCodecContext * avctx) {
1412 AVSContext *h = avctx->priv_data;
1413 h->got_keyframe = 0;
1414 }
1415
1416 static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
1417 uint8_t * buf, int buf_size) {
1418 AVSContext *h = avctx->priv_data;
1419 MpegEncContext *s = &h->s;
1420 int input_size;
1421 const uint8_t *buf_end;
1422 const uint8_t *buf_ptr;
1423 AVFrame *picture = data;
1424 uint32_t stc;
1425
1426 s->avctx = avctx;
1427
1428 if (buf_size == 0) {
1429 if(!s->low_delay && h->DPB[0].data[0]) {
1430 *data_size = sizeof(AVPicture);
1431 *picture = *(AVFrame *) &h->DPB[0];
1432 }
1433 return 0;
1434 }
1435
1436 buf_ptr = buf;
1437 buf_end = buf + buf_size;
1438 for(;;) {
1439 buf_ptr = ff_find_start_code(buf_ptr,buf_end, &stc);
1440 if(stc & 0xFFFFFE00)
1441 return FFMAX(0, buf_ptr - buf - s->parse_context.last_index);
1442 input_size = (buf_end - buf_ptr)*8;
1443 switch(stc) {
1444 case SEQ_START_CODE:
1445 init_get_bits(&s->gb, buf_ptr, input_size);
1446 decode_seq_header(h);
1447 break;
1448 case PIC_I_START_CODE:
1449 if(!h->got_keyframe) {
1450 if(h->DPB[0].data[0])
1451 avctx->release_buffer(avctx, (AVFrame *)&h->DPB[0]);
1452 if(h->DPB[1].data[0])
1453 avctx->release_buffer(avctx, (AVFrame *)&h->DPB[1]);
1454 h->got_keyframe = 1;
1455 }
1456 case PIC_PB_START_CODE:
1457 *data_size = 0;
1458 if(!h->got_keyframe)
1459 break;
1460 init_get_bits(&s->gb, buf_ptr, input_size);
1461 h->stc = stc;
1462 if(decode_pic(h))
1463 break;
1464 *data_size = sizeof(AVPicture);
1465 if(h->pic_type != FF_B_TYPE) {
1466 if(h->DPB[1].data[0]) {
1467 *picture = *(AVFrame *) &h->DPB[1];
1468 } else {
1469 *data_size = 0;
1470 }
1471 } else
1472 *picture = *(AVFrame *) &h->picture;
1473 break;
1474 case EXT_START_CODE:
1475 //mpeg_decode_extension(avctx,buf_ptr, input_size);
1476 break;
1477 case USER_START_CODE:
1478 //mpeg_decode_user_data(avctx,buf_ptr, input_size);
1479 break;
1480 default:
1481 if (stc >= SLICE_MIN_START_CODE &&
1482 stc <= SLICE_MAX_START_CODE) {
1483 init_get_bits(&s->gb, buf_ptr, input_size);
1484 decode_slice_header(h, &s->gb);
1485 }
1486 break;
1487 }
1488 }
1489 }
1490
1491 static int cavs_decode_init(AVCodecContext * avctx) {
1492 AVSContext *h = avctx->priv_data;
1493 MpegEncContext * const s = &h->s;
1494
1495 MPV_decode_defaults(s);
1496 s->avctx = avctx;
1497
1498 avctx->pix_fmt= PIX_FMT_YUV420P;
1499
1500 h->luma_scan[0] = 0;
1501 h->luma_scan[1] = 8;
1502 h->intra_pred_l[ INTRA_L_VERT] = intra_pred_vert;
1503 h->intra_pred_l[ INTRA_L_HORIZ] = intra_pred_horiz;
1504 h->intra_pred_l[ INTRA_L_LP] = intra_pred_lp;
1505 h->intra_pred_l[ INTRA_L_DOWN_LEFT] = intra_pred_down_left;
1506 h->intra_pred_l[INTRA_L_DOWN_RIGHT] = intra_pred_down_right;
1507 h->intra_pred_l[ INTRA_L_LP_LEFT] = intra_pred_lp_left;
1508 h->intra_pred_l[ INTRA_L_LP_TOP] = intra_pred_lp_top;
1509 h->intra_pred_l[ INTRA_L_DC_128] = intra_pred_dc_128;
1510 h->intra_pred_c[ INTRA_C_LP] = intra_pred_lp;
1511 h->intra_pred_c[ INTRA_C_HORIZ] = intra_pred_horiz;
1512 h->intra_pred_c[ INTRA_C_VERT] = intra_pred_vert;
1513 h->intra_pred_c[ INTRA_C_PLANE] = intra_pred_plane;
1514 h->intra_pred_c[ INTRA_C_LP_LEFT] = intra_pred_lp_left;
1515 h->intra_pred_c[ INTRA_C_LP_TOP] = intra_pred_lp_top;
1516 h->intra_pred_c[ INTRA_C_DC_128] = intra_pred_dc_128;
1517 h->mv[ 7] = un_mv;
1518 h->mv[19] = un_mv;
1519 return 0;
1520 }
1521
1522 static int cavs_decode_end(AVCodecContext * avctx) {
1523 AVSContext *h = avctx->priv_data;
1524
1525 av_free(h->top_qp);
1526 av_free(h->top_mv[0]);
1527 av_free(h->top_mv[1]);
1528 av_free(h->top_pred_Y);
1529 av_free(h->top_border_y);
1530 av_free(h->top_border_u);
1531 av_free(h->top_border_v);
1532 av_free(h->col_mv);
1533 av_free(h->col_type_base);
1534 return 0;
1535 }
1536
1537 AVCodec cavs_decoder = {
1538 "cavs",
1539 CODEC_TYPE_VIDEO,
1540 CODEC_ID_CAVS,
1541 sizeof(AVSContext),
1542 cavs_decode_init,
1543 NULL,
1544 cavs_decode_end,
1545 cavs_decode_frame,
1546 CODEC_CAP_DR1 | CODEC_CAP_DELAY,
1547 .flush= ff_cavs_flush,
1548 };