996567cbe4502316808c017bb736ed00836dc0ab
[libav.git] / libavcodec / cavs.c
1 /*
2 * Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
3 * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 /**
21 * @file cavs.c
22 * Chinese AVS video (AVS1-P2, JiZhun profile) decoder
23 * @author Stefan Gehrer <stefan.gehrer@gmx.de>
24 */
25
26 #include "avcodec.h"
27 #include "bitstream.h"
28 #include "golomb.h"
29 #include "mpegvideo.h"
30 #include "cavsdata.h"
31
32 typedef struct {
33 MpegEncContext s;
34 Picture picture; ///< currently decoded frame
35 Picture DPB[2]; ///< reference frames
36 int dist[2]; ///< temporal distances from current frame to ref frames
37 int profile, level;
38 int aspect_ratio;
39 int mb_width, mb_height;
40 int pic_type;
41 int progressive;
42 int pic_structure;
43 int skip_mode_flag; ///< select between skip_count or one skip_flag per MB
44 int loop_filter_disable;
45 int alpha_offset, beta_offset;
46 int ref_flag;
47 int mbx, mby; ///< macroblock coordinates
48 int flags; ///< availability flags of neighbouring macroblocks
49 int stc; ///< last start code
50 uint8_t *cy, *cu, *cv; ///< current MB sample pointers
51 int left_qp;
52 uint8_t *top_qp;
53
54 /** mv motion vector cache
55 0: D3 B2 B3 C2
56 4: A1 X0 X1 -
57 8: A3 X2 X3 -
58
59 X are the vectors in the current macroblock (5,6,9,10)
60 A is the macroblock to the left (4,8)
61 B is the macroblock to the top (1,2)
62 C is the macroblock to the top-right (3)
63 D is the macroblock to the top-left (0)
64
65 the same is repeated for backward motion vectors */
66 vector_t mv[2*4*3];
67 vector_t *top_mv[2];
68 vector_t *col_mv;
69
70 /** luma pred mode cache
71 0: -- B2 B3
72 3: A1 X0 X1
73 6: A3 X2 X3 */
74 int pred_mode_Y[3*3];
75 int *top_pred_Y;
76 int l_stride, c_stride;
77 int luma_scan[4];
78 int qp;
79 int qp_fixed;
80 int cbp;
81
82 /** intra prediction is done with un-deblocked samples
83 they are saved here before deblocking the MB */
84 uint8_t *top_border_y, *top_border_u, *top_border_v;
85 uint8_t left_border_y[16], left_border_u[10], left_border_v[10];
86 uint8_t topleft_border_y, topleft_border_u, topleft_border_v;
87
88 void (*intra_pred_l[8])(uint8_t *d,uint8_t *top,uint8_t *left,int stride);
89 void (*intra_pred_c[7])(uint8_t *d,uint8_t *top,uint8_t *left,int stride);
90 uint8_t *col_type_base;
91 uint8_t *col_type;
92
93 /* scaling factors for MV prediction */
94 int sym_factor; ///< for scaling in symmetrical B block
95 int direct_den[2]; ///< for scaling in direct B block
96 int scale_den[2]; ///< for scaling neighbouring MVs
97
98 int got_keyframe;
99 } AVSContext;
100
101 /*****************************************************************************
102 *
103 * in-loop deblocking filter
104 *
105 ****************************************************************************/
106
107 static inline int get_bs_p(vector_t *mvP, vector_t *mvQ) {
108 if((mvP->ref == REF_INTRA) || (mvQ->ref == REF_INTRA))
109 return 2;
110 if(mvP->ref != mvQ->ref)
111 return 1;
112 if( (abs(mvP->x - mvQ->x) >= 4) || (abs(mvP->y - mvQ->y) >= 4) )
113 return 1;
114 return 0;
115 }
116
117 static inline int get_bs_b(vector_t *mvP, vector_t *mvQ) {
118 if((mvP->ref == REF_INTRA) || (mvQ->ref == REF_INTRA)) {
119 return 2;
120 } else {
121 vector_t *mvPbw = mvP + MV_BWD_OFFS;
122 vector_t *mvQbw = mvQ + MV_BWD_OFFS;
123 if( (abs( mvP->x - mvQ->x) >= 4) ||
124 (abs( mvP->y - mvQ->y) >= 4) ||
125 (abs(mvPbw->x - mvQbw->x) >= 4) ||
126 (abs(mvPbw->y - mvQbw->y) >= 4) )
127 return 1;
128 }
129 return 0;
130 }
131
132 #define SET_PARAMS \
133 alpha = alpha_tab[clip(qp_avg + h->alpha_offset,0,63)]; \
134 beta = beta_tab[clip(qp_avg + h->beta_offset, 0,63)]; \
135 tc = tc_tab[clip(qp_avg + h->alpha_offset,0,63)];
136
137 /**
138 * in-loop deblocking filter for a single macroblock
139 *
140 * boundary strength (bs) mapping:
141 *
142 * --4---5--
143 * 0 2 |
144 * | 6 | 7 |
145 * 1 3 |
146 * ---------
147 *
148 */
149 static void filter_mb(AVSContext *h, enum mb_t mb_type) {
150 DECLARE_ALIGNED_8(uint8_t, bs[8]);
151 int qp_avg, alpha, beta, tc;
152 int i;
153
154 /* save un-deblocked lines */
155 h->topleft_border_y = h->top_border_y[h->mbx*16+15];
156 h->topleft_border_u = h->top_border_u[h->mbx*10+8];
157 h->topleft_border_v = h->top_border_v[h->mbx*10+8];
158 memcpy(&h->top_border_y[h->mbx*16], h->cy + 15* h->l_stride,16);
159 memcpy(&h->top_border_u[h->mbx*10+1], h->cu + 7* h->c_stride,8);
160 memcpy(&h->top_border_v[h->mbx*10+1], h->cv + 7* h->c_stride,8);
161 for(i=0;i<8;i++) {
162 h->left_border_y[i*2+0] = *(h->cy + 15 + (i*2+0)*h->l_stride);
163 h->left_border_y[i*2+1] = *(h->cy + 15 + (i*2+1)*h->l_stride);
164 h->left_border_u[i+1] = *(h->cu + 7 + i*h->c_stride);
165 h->left_border_v[i+1] = *(h->cv + 7 + i*h->c_stride);
166 }
167 if(!h->loop_filter_disable) {
168 /* clear bs */
169 *((uint64_t *)bs) = 0;
170 /* determine bs */
171 switch(mb_type) {
172 case I_8X8:
173 *((uint64_t *)bs) = 0x0202020202020202ULL;
174 break;
175 case P_8X8:
176 case P_8X16:
177 bs[2] = get_bs_p(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X1]);
178 bs[3] = get_bs_p(&h->mv[MV_FWD_X2], &h->mv[MV_FWD_X3]);
179 case P_16X8:
180 bs[6] = get_bs_p(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X2]);
181 bs[7] = get_bs_p(&h->mv[MV_FWD_X1], &h->mv[MV_FWD_X3]);
182 case P_16X16:
183 case P_SKIP:
184 bs[0] = get_bs_p(&h->mv[MV_FWD_A1], &h->mv[MV_FWD_X0]);
185 bs[1] = get_bs_p(&h->mv[MV_FWD_A3], &h->mv[MV_FWD_X2]);
186 bs[4] = get_bs_p(&h->mv[MV_FWD_B2], &h->mv[MV_FWD_X0]);
187 bs[5] = get_bs_p(&h->mv[MV_FWD_B3], &h->mv[MV_FWD_X1]);
188 break;
189 case B_SKIP:
190 case B_DIRECT:
191 case B_8X8:
192 bs[2] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X1]);
193 bs[3] = get_bs_b(&h->mv[MV_FWD_X2], &h->mv[MV_FWD_X3]);
194 bs[6] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X2]);
195 bs[7] = get_bs_b(&h->mv[MV_FWD_X1], &h->mv[MV_FWD_X3]);
196 case B_FWD_16X16:
197 case B_BWD_16X16:
198 case B_SYM_16X16:
199 bs[0] = get_bs_b(&h->mv[MV_FWD_A1], &h->mv[MV_FWD_X0]);
200 bs[1] = get_bs_b(&h->mv[MV_FWD_A3], &h->mv[MV_FWD_X2]);
201 bs[4] = get_bs_b(&h->mv[MV_FWD_B2], &h->mv[MV_FWD_X0]);
202 bs[5] = get_bs_b(&h->mv[MV_FWD_B3], &h->mv[MV_FWD_X1]);
203 break;
204 default:
205 if(mb_type & 1) { //16X8
206 bs[6] = bs[7] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X2]);
207 } else { //8X16
208 bs[2] = bs[3] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X1]);
209 }
210 bs[0] = get_bs_b(&h->mv[MV_FWD_A1], &h->mv[MV_FWD_X0]);
211 bs[1] = get_bs_b(&h->mv[MV_FWD_A3], &h->mv[MV_FWD_X2]);
212 bs[4] = get_bs_b(&h->mv[MV_FWD_B2], &h->mv[MV_FWD_X0]);
213 bs[5] = get_bs_b(&h->mv[MV_FWD_B3], &h->mv[MV_FWD_X1]);
214 }
215 if( *((uint64_t *)bs) ) {
216 if(h->flags & A_AVAIL) {
217 qp_avg = (h->qp + h->left_qp + 1) >> 1;
218 SET_PARAMS;
219 h->s.dsp.cavs_filter_lv(h->cy,h->l_stride,alpha,beta,tc,bs[0],bs[1]);
220 h->s.dsp.cavs_filter_cv(h->cu,h->c_stride,alpha,beta,tc,bs[0],bs[1]);
221 h->s.dsp.cavs_filter_cv(h->cv,h->c_stride,alpha,beta,tc,bs[0],bs[1]);
222 }
223 qp_avg = h->qp;
224 SET_PARAMS;
225 h->s.dsp.cavs_filter_lv(h->cy + 8,h->l_stride,alpha,beta,tc,bs[2],bs[3]);
226 h->s.dsp.cavs_filter_lh(h->cy + 8*h->l_stride,h->l_stride,alpha,beta,tc,
227 bs[6],bs[7]);
228
229 if(h->flags & B_AVAIL) {
230 qp_avg = (h->qp + h->top_qp[h->mbx] + 1) >> 1;
231 SET_PARAMS;
232 h->s.dsp.cavs_filter_lh(h->cy,h->l_stride,alpha,beta,tc,bs[4],bs[5]);
233 h->s.dsp.cavs_filter_ch(h->cu,h->c_stride,alpha,beta,tc,bs[4],bs[5]);
234 h->s.dsp.cavs_filter_ch(h->cv,h->c_stride,alpha,beta,tc,bs[4],bs[5]);
235 }
236 }
237 }
238 h->left_qp = h->qp;
239 h->top_qp[h->mbx] = h->qp;
240 }
241
242 #undef SET_PARAMS
243
244 /*****************************************************************************
245 *
246 * spatial intra prediction
247 *
248 ****************************************************************************/
249
250 static inline void load_intra_pred_luma(AVSContext *h, uint8_t *top,
251 uint8_t *left, int block) {
252 int i;
253
254 switch(block) {
255 case 0:
256 memcpy(&left[1],h->left_border_y,16);
257 left[0] = left[1];
258 left[17] = left[16];
259 memcpy(&top[1],&h->top_border_y[h->mbx*16],16);
260 top[17] = top[16];
261 top[0] = top[1];
262 if((h->flags & A_AVAIL) && (h->flags & B_AVAIL))
263 left[0] = top[0] = h->topleft_border_y;
264 break;
265 case 1:
266 for(i=0;i<8;i++)
267 left[i+1] = *(h->cy + 7 + i*h->l_stride);
268 memset(&left[9],left[8],9);
269 left[0] = left[1];
270 memcpy(&top[1],&h->top_border_y[h->mbx*16+8],8);
271 if(h->flags & C_AVAIL)
272 memcpy(&top[9],&h->top_border_y[(h->mbx + 1)*16],8);
273 else
274 memset(&top[9],top[8],9);
275 top[17] = top[16];
276 top[0] = top[1];
277 if(h->flags & B_AVAIL)
278 left[0] = top[0] = h->top_border_y[h->mbx*16+7];
279 break;
280 case 2:
281 memcpy(&left[1],&h->left_border_y[8],8);
282 memset(&left[9],left[8],9);
283 memcpy(&top[1],h->cy + 7*h->l_stride,16);
284 top[17] = top[16];
285 left[0] = h->left_border_y[7];
286 top[0] = top[1];
287 if(h->flags & A_AVAIL)
288 top[0] = left[0];
289 break;
290 case 3:
291 for(i=0;i<9;i++)
292 left[i] = *(h->cy + 7 + (i+7)*h->l_stride);
293 memset(&left[9],left[8],9);
294 memcpy(&top[0],h->cy + 7 + 7*h->l_stride,9);
295 memset(&top[9],top[8],9);
296 break;
297 }
298 }
299
300 static void intra_pred_vert(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
301 int y;
302 uint64_t a = *((uint64_t *)(&top[1]));
303 for(y=0;y<8;y++) {
304 *((uint64_t *)(d+y*stride)) = a;
305 }
306 }
307
308 static void intra_pred_horiz(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
309 int y;
310 uint64_t a;
311 for(y=0;y<8;y++) {
312 a = left[y+1] * 0x0101010101010101ULL;
313 *((uint64_t *)(d+y*stride)) = a;
314 }
315 }
316
317 static void intra_pred_dc_128(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
318 int y;
319 uint64_t a = 0x8080808080808080ULL;
320 for(y=0;y<8;y++)
321 *((uint64_t *)(d+y*stride)) = a;
322 }
323
324 static void intra_pred_plane(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
325 int x,y,ia;
326 int ih = 0;
327 int iv = 0;
328 uint8_t *cm = cropTbl + MAX_NEG_CROP;
329
330 for(x=0; x<4; x++) {
331 ih += (x+1)*(top[5+x]-top[3-x]);
332 iv += (x+1)*(left[5+x]-left[3-x]);
333 }
334 ia = (top[8]+left[8])<<4;
335 ih = (17*ih+16)>>5;
336 iv = (17*iv+16)>>5;
337 for(y=0; y<8; y++)
338 for(x=0; x<8; x++)
339 d[y*stride+x] = cm[(ia+(x-3)*ih+(y-3)*iv+16)>>5];
340 }
341
342 #define LOWPASS(ARRAY,INDEX) \
343 (( ARRAY[(INDEX)-1] + 2*ARRAY[(INDEX)] + ARRAY[(INDEX)+1] + 2) >> 2)
344
345 static void intra_pred_lp(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
346 int x,y;
347 for(y=0; y<8; y++)
348 for(x=0; x<8; x++)
349 d[y*stride+x] = (LOWPASS(top,x+1) + LOWPASS(left,y+1)) >> 1;
350 }
351
352 static void intra_pred_down_left(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
353 int x,y;
354 for(y=0; y<8; y++)
355 for(x=0; x<8; x++)
356 d[y*stride+x] = (LOWPASS(top,x+y+2) + LOWPASS(left,x+y+2)) >> 1;
357 }
358
359 static void intra_pred_down_right(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
360 int x,y;
361 for(y=0; y<8; y++)
362 for(x=0; x<8; x++)
363 if(x==y)
364 d[y*stride+x] = (left[1]+2*top[0]+top[1]+2)>>2;
365 else if(x>y)
366 d[y*stride+x] = LOWPASS(top,x-y);
367 else
368 d[y*stride+x] = LOWPASS(left,y-x);
369 }
370
371 static void intra_pred_lp_left(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
372 int x,y;
373 for(y=0; y<8; y++)
374 for(x=0; x<8; x++)
375 d[y*stride+x] = LOWPASS(left,y+1);
376 }
377
378 static void intra_pred_lp_top(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
379 int x,y;
380 for(y=0; y<8; y++)
381 for(x=0; x<8; x++)
382 d[y*stride+x] = LOWPASS(top,x+1);
383 }
384
385 #undef LOWPASS
386
387 static inline void modify_pred(const int_fast8_t *mod_table, int *mode) {
388 *mode = mod_table[*mode];
389 if(*mode < 0) {
390 av_log(NULL, AV_LOG_ERROR, "Illegal intra prediction mode\n");
391 *mode = 0;
392 }
393 }
394
395 /*****************************************************************************
396 *
397 * motion compensation
398 *
399 ****************************************************************************/
400
401 static inline void mc_dir_part(AVSContext *h,Picture *pic,int square,
402 int chroma_height,int delta,int list,uint8_t *dest_y,
403 uint8_t *dest_cb,uint8_t *dest_cr,int src_x_offset,
404 int src_y_offset,qpel_mc_func *qpix_op,
405 h264_chroma_mc_func chroma_op,vector_t *mv){
406 MpegEncContext * const s = &h->s;
407 const int mx= mv->x + src_x_offset*8;
408 const int my= mv->y + src_y_offset*8;
409 const int luma_xy= (mx&3) + ((my&3)<<2);
410 uint8_t * src_y = pic->data[0] + (mx>>2) + (my>>2)*h->l_stride;
411 uint8_t * src_cb= pic->data[1] + (mx>>3) + (my>>3)*h->c_stride;
412 uint8_t * src_cr= pic->data[2] + (mx>>3) + (my>>3)*h->c_stride;
413 int extra_width= 0; //(s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16;
414 int extra_height= extra_width;
415 int emu=0;
416 const int full_mx= mx>>2;
417 const int full_my= my>>2;
418 const int pic_width = 16*h->mb_width;
419 const int pic_height = 16*h->mb_height;
420
421 if(!pic->data[0])
422 return;
423 if(mx&7) extra_width -= 3;
424 if(my&7) extra_height -= 3;
425
426 if( full_mx < 0-extra_width
427 || full_my < 0-extra_height
428 || full_mx + 16/*FIXME*/ > pic_width + extra_width
429 || full_my + 16/*FIXME*/ > pic_height + extra_height){
430 ff_emulated_edge_mc(s->edge_emu_buffer, src_y - 2 - 2*h->l_stride, h->l_stride,
431 16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, pic_width, pic_height);
432 src_y= s->edge_emu_buffer + 2 + 2*h->l_stride;
433 emu=1;
434 }
435
436 qpix_op[luma_xy](dest_y, src_y, h->l_stride); //FIXME try variable height perhaps?
437 if(!square){
438 qpix_op[luma_xy](dest_y + delta, src_y + delta, h->l_stride);
439 }
440
441 if(emu){
442 ff_emulated_edge_mc(s->edge_emu_buffer, src_cb, h->c_stride,
443 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
444 src_cb= s->edge_emu_buffer;
445 }
446 chroma_op(dest_cb, src_cb, h->c_stride, chroma_height, mx&7, my&7);
447
448 if(emu){
449 ff_emulated_edge_mc(s->edge_emu_buffer, src_cr, h->c_stride,
450 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
451 src_cr= s->edge_emu_buffer;
452 }
453 chroma_op(dest_cr, src_cr, h->c_stride, chroma_height, mx&7, my&7);
454 }
455
456 static inline void mc_part_std(AVSContext *h,int square,int chroma_height,int delta,
457 uint8_t *dest_y,uint8_t *dest_cb,uint8_t *dest_cr,
458 int x_offset, int y_offset,qpel_mc_func *qpix_put,
459 h264_chroma_mc_func chroma_put,qpel_mc_func *qpix_avg,
460 h264_chroma_mc_func chroma_avg, vector_t *mv){
461 qpel_mc_func *qpix_op= qpix_put;
462 h264_chroma_mc_func chroma_op= chroma_put;
463
464 dest_y += 2*x_offset + 2*y_offset*h->l_stride;
465 dest_cb += x_offset + y_offset*h->c_stride;
466 dest_cr += x_offset + y_offset*h->c_stride;
467 x_offset += 8*h->mbx;
468 y_offset += 8*h->mby;
469
470 if(mv->ref >= 0){
471 Picture *ref= &h->DPB[mv->ref];
472 mc_dir_part(h, ref, square, chroma_height, delta, 0,
473 dest_y, dest_cb, dest_cr, x_offset, y_offset,
474 qpix_op, chroma_op, mv);
475
476 qpix_op= qpix_avg;
477 chroma_op= chroma_avg;
478 }
479
480 if((mv+MV_BWD_OFFS)->ref >= 0){
481 Picture *ref= &h->DPB[0];
482 mc_dir_part(h, ref, square, chroma_height, delta, 1,
483 dest_y, dest_cb, dest_cr, x_offset, y_offset,
484 qpix_op, chroma_op, mv+MV_BWD_OFFS);
485 }
486 }
487
488 static void inter_pred(AVSContext *h) {
489 /* always do 8x8 blocks TODO: are larger blocks worth it? */
490 mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 0, 0,
491 h->s.dsp.put_cavs_qpel_pixels_tab[1],
492 h->s.dsp.put_h264_chroma_pixels_tab[1],
493 h->s.dsp.avg_cavs_qpel_pixels_tab[1],
494 h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X0]);
495 mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 4, 0,
496 h->s.dsp.put_cavs_qpel_pixels_tab[1],
497 h->s.dsp.put_h264_chroma_pixels_tab[1],
498 h->s.dsp.avg_cavs_qpel_pixels_tab[1],
499 h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X1]);
500 mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 0, 4,
501 h->s.dsp.put_cavs_qpel_pixels_tab[1],
502 h->s.dsp.put_h264_chroma_pixels_tab[1],
503 h->s.dsp.avg_cavs_qpel_pixels_tab[1],
504 h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X2]);
505 mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 4, 4,
506 h->s.dsp.put_cavs_qpel_pixels_tab[1],
507 h->s.dsp.put_h264_chroma_pixels_tab[1],
508 h->s.dsp.avg_cavs_qpel_pixels_tab[1],
509 h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X3]);
510 /* set intra prediction modes to default values */
511 h->pred_mode_Y[3] = h->pred_mode_Y[6] = INTRA_L_LP;
512 h->top_pred_Y[h->mbx*2+0] = h->top_pred_Y[h->mbx*2+1] = INTRA_L_LP;
513 }
514
515 /*****************************************************************************
516 *
517 * motion vector prediction
518 *
519 ****************************************************************************/
520
521 static inline void set_mvs(vector_t *mv, enum block_t size) {
522 switch(size) {
523 case BLK_16X16:
524 mv[MV_STRIDE ] = mv[0];
525 mv[MV_STRIDE+1] = mv[0];
526 case BLK_16X8:
527 mv[1] = mv[0];
528 break;
529 case BLK_8X16:
530 mv[MV_STRIDE] = mv[0];
531 break;
532 }
533 }
534
535 static inline void store_mvs(AVSContext *h) {
536 h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 0] = h->mv[MV_FWD_X0];
537 h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 1] = h->mv[MV_FWD_X1];
538 h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 2] = h->mv[MV_FWD_X2];
539 h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 3] = h->mv[MV_FWD_X3];
540 }
541
542 static inline void scale_mv(AVSContext *h, int *d_x, int *d_y, vector_t *src, int distp) {
543 int den = h->scale_den[src->ref];
544
545 *d_x = (src->x*distp*den + 256 + (src->x>>31)) >> 9;
546 *d_y = (src->y*distp*den + 256 + (src->y>>31)) >> 9;
547 }
548
549 static inline void mv_pred_median(AVSContext *h, vector_t *mvP, vector_t *mvA, vector_t *mvB, vector_t *mvC) {
550 int ax, ay, bx, by, cx, cy;
551 int len_ab, len_bc, len_ca, len_mid;
552
553 /* scale candidates according to their temporal span */
554 scale_mv(h, &ax, &ay, mvA, mvP->dist);
555 scale_mv(h, &bx, &by, mvB, mvP->dist);
556 scale_mv(h, &cx, &cy, mvC, mvP->dist);
557 /* find the geometrical median of the three candidates */
558 len_ab = abs(ax - bx) + abs(ay - by);
559 len_bc = abs(bx - cx) + abs(by - cy);
560 len_ca = abs(cx - ax) + abs(cy - ay);
561 len_mid = mid_pred(len_ab, len_bc, len_ca);
562 if(len_mid == len_ab) {
563 mvP->x = cx;
564 mvP->y = cy;
565 } else if(len_mid == len_bc) {
566 mvP->x = ax;
567 mvP->y = ay;
568 } else {
569 mvP->x = bx;
570 mvP->y = by;
571 }
572 }
573
574 static inline void mv_pred_direct(AVSContext *h, vector_t *pmv_fw,
575 vector_t *col_mv) {
576 vector_t *pmv_bw = pmv_fw + MV_BWD_OFFS;
577 int den = h->direct_den[col_mv->ref];
578 int m = col_mv->x >> 31;
579
580 pmv_fw->dist = h->dist[1];
581 pmv_bw->dist = h->dist[0];
582 pmv_fw->ref = 1;
583 pmv_bw->ref = 0;
584 /* scale the co-located motion vector according to its temporal span */
585 pmv_fw->x = (((den+(den*col_mv->x*pmv_fw->dist^m)-m-1)>>14)^m)-m;
586 pmv_bw->x = m-(((den+(den*col_mv->x*pmv_bw->dist^m)-m-1)>>14)^m);
587 m = col_mv->y >> 31;
588 pmv_fw->y = (((den+(den*col_mv->y*pmv_fw->dist^m)-m-1)>>14)^m)-m;
589 pmv_bw->y = m-(((den+(den*col_mv->y*pmv_bw->dist^m)-m-1)>>14)^m);
590 }
591
592 static inline void mv_pred_sym(AVSContext *h, vector_t *src, enum block_t size) {
593 vector_t *dst = src + MV_BWD_OFFS;
594
595 /* backward mv is the scaled and negated forward mv */
596 dst->x = -((src->x * h->sym_factor + 256) >> 9);
597 dst->y = -((src->y * h->sym_factor + 256) >> 9);
598 dst->ref = 0;
599 dst->dist = h->dist[0];
600 set_mvs(dst, size);
601 }
602
603 static void mv_pred(AVSContext *h, enum mv_loc_t nP, enum mv_loc_t nC,
604 enum mv_pred_t mode, enum block_t size, int ref) {
605 vector_t *mvP = &h->mv[nP];
606 vector_t *mvA = &h->mv[nP-1];
607 vector_t *mvB = &h->mv[nP-4];
608 vector_t *mvC = &h->mv[nC];
609 int mvAref = mvA->ref;
610 int mvBref = mvB->ref;
611 int mvCref;
612
613 mvP->ref = ref;
614 mvP->dist = h->dist[mvP->ref];
615 if(mvC->ref == NOT_AVAIL)
616 mvC = &h->mv[nP-5]; // set to top-left (mvD)
617 mvCref = mvC->ref;
618 if(mode == MV_PRED_PSKIP) {
619 if((mvAref == NOT_AVAIL) || (mvBref == NOT_AVAIL) ||
620 ((mvA->x | mvA->y | mvA->ref) == 0) ||
621 ((mvB->x | mvB->y | mvB->ref) == 0) ) {
622 mvP->x = mvP->y = 0;
623 set_mvs(mvP,size);
624 return;
625 }
626 }
627 /* if there is only one suitable candidate, take it */
628 if((mvAref >= 0) && (mvBref < 0) && (mvCref < 0)) {
629 mvP->x = mvA->x;
630 mvP->y = mvA->y;
631 } else if((mvAref < 0) && (mvBref >= 0) && (mvCref < 0)) {
632 mvP->x = mvB->x;
633 mvP->y = mvB->y;
634 } else if((mvAref < 0) && (mvBref < 0) && (mvCref >= 0)) {
635 mvP->x = mvC->x;
636 mvP->y = mvC->y;
637 } else {
638 switch(mode) {
639 case MV_PRED_LEFT:
640 if(mvAref == mvP->ref) {
641 mvP->x = mvA->x;
642 mvP->y = mvA->y;
643 } else
644 mv_pred_median(h, mvP, mvA, mvB, mvC);
645 break;
646 case MV_PRED_TOP:
647 if(mvBref == mvP->ref) {
648 mvP->x = mvB->x;
649 mvP->y = mvB->y;
650 } else
651 mv_pred_median(h, mvP, mvA, mvB, mvC);
652 break;
653 case MV_PRED_TOPRIGHT:
654 if(mvCref == mvP->ref) {
655 mvP->x = mvC->x;
656 mvP->y = mvC->y;
657 } else
658 mv_pred_median(h, mvP, mvA, mvB, mvC);
659 break;
660 default:
661 mv_pred_median(h, mvP, mvA, mvB, mvC);
662 break;
663 }
664 }
665 if(mode < MV_PRED_PSKIP) {
666 mvP->x += get_se_golomb(&h->s.gb);
667 mvP->y += get_se_golomb(&h->s.gb);
668 }
669 set_mvs(mvP,size);
670 }
671
672 /*****************************************************************************
673 *
674 * residual data decoding
675 *
676 ****************************************************************************/
677
678 /** kth-order exponential golomb code */
679 static inline int get_ue_code(GetBitContext *gb, int order) {
680 if(order) {
681 int ret = get_ue_golomb(gb) << order;
682 return ret + get_bits(gb,order);
683 }
684 return get_ue_golomb(gb);
685 }
686
687 /**
688 * decode coefficients from one 8x8 block, dequantize, inverse transform
689 * and add them to sample block
690 * @param r pointer to 2D VLC table
691 * @param esc_golomb_order escape codes are k-golomb with this order k
692 * @param qp quantizer
693 * @param dst location of sample block
694 * @param stride line stride in frame buffer
695 */
696 static int decode_residual_block(AVSContext *h, GetBitContext *gb,
697 const residual_vlc_t *r, int esc_golomb_order,
698 int qp, uint8_t *dst, int stride) {
699 int i,pos = -1;
700 int level_code, esc_code, level, run, mask;
701 int level_buf[64];
702 int run_buf[64];
703 int dqm = dequant_mul[qp];
704 int dqs = dequant_shift[qp];
705 int dqa = 1 << (dqs - 1);
706 const uint8_t *scantab = ff_zigzag_direct;
707 DCTELEM block[64];
708
709 memset(block,0,64*sizeof(DCTELEM));
710 for(i=0;i<65;i++) {
711 level_code = get_ue_code(gb,r->golomb_order);
712 if(level_code >= ESCAPE_CODE) {
713 run = (level_code - ESCAPE_CODE) >> 1;
714 esc_code = get_ue_code(gb,esc_golomb_order);
715 level = esc_code + (run > r->max_run ? 1 : r->level_add[run]);
716 while(level > r->inc_limit)
717 r++;
718 mask = -(level_code & 1);
719 level = (level^mask) - mask;
720 } else {
721 if(level_code < 0)
722 return -1;
723 level = r->rltab[level_code][0];
724 if(!level) //end of block signal
725 break;
726 run = r->rltab[level_code][1];
727 r += r->rltab[level_code][2];
728 }
729 level_buf[i] = level;
730 run_buf[i] = run;
731 }
732 /* inverse scan and dequantization */
733 while(--i >= 0){
734 pos += 1 + run_buf[i];
735 if(pos > 63) {
736 av_log(h->s.avctx, AV_LOG_ERROR,
737 "position out of block bounds at pic %d MB(%d,%d)\n",
738 h->picture.poc, h->mbx, h->mby);
739 return -1;
740 }
741 block[scantab[pos]] = (level_buf[i]*dqm + dqa) >> dqs;
742 }
743 h->s.dsp.cavs_idct8_add(dst,block,stride);
744 return 0;
745 }
746
747
748 static inline void decode_residual_chroma(AVSContext *h) {
749 if(h->cbp & (1<<4))
750 decode_residual_block(h,&h->s.gb,chroma_2dvlc,0, chroma_qp[h->qp],
751 h->cu,h->c_stride);
752 if(h->cbp & (1<<5))
753 decode_residual_block(h,&h->s.gb,chroma_2dvlc,0, chroma_qp[h->qp],
754 h->cv,h->c_stride);
755 }
756
757 static inline int decode_residual_inter(AVSContext *h) {
758 int block;
759
760 /* get coded block pattern */
761 int cbp= get_ue_golomb(&h->s.gb);
762 if(cbp > 63){
763 av_log(h->s.avctx, AV_LOG_ERROR, "illegal inter cbp\n");
764 return -1;
765 }
766 h->cbp = cbp_tab[cbp][1];
767
768 /* get quantizer */
769 if(h->cbp && !h->qp_fixed)
770 h->qp += get_se_golomb(&h->s.gb);
771 for(block=0;block<4;block++)
772 if(h->cbp & (1<<block))
773 decode_residual_block(h,&h->s.gb,inter_2dvlc,0,h->qp,
774 h->cy + h->luma_scan[block], h->l_stride);
775 decode_residual_chroma(h);
776
777 return 0;
778 }
779
780 /*****************************************************************************
781 *
782 * macroblock level
783 *
784 ****************************************************************************/
785
786 /**
787 * initialise predictors for motion vectors and intra prediction
788 */
789 static inline void init_mb(AVSContext *h) {
790 int i;
791
792 /* copy predictors from top line (MB B and C) into cache */
793 for(i=0;i<3;i++) {
794 h->mv[MV_FWD_B2+i] = h->top_mv[0][h->mbx*2+i];
795 h->mv[MV_BWD_B2+i] = h->top_mv[1][h->mbx*2+i];
796 }
797 h->pred_mode_Y[1] = h->top_pred_Y[h->mbx*2+0];
798 h->pred_mode_Y[2] = h->top_pred_Y[h->mbx*2+1];
799 /* clear top predictors if MB B is not available */
800 if(!(h->flags & B_AVAIL)) {
801 h->mv[MV_FWD_B2] = un_mv;
802 h->mv[MV_FWD_B3] = un_mv;
803 h->mv[MV_BWD_B2] = un_mv;
804 h->mv[MV_BWD_B3] = un_mv;
805 h->pred_mode_Y[1] = h->pred_mode_Y[2] = NOT_AVAIL;
806 h->flags &= ~(C_AVAIL|D_AVAIL);
807 } else if(h->mbx) {
808 h->flags |= D_AVAIL;
809 }
810 if(h->mbx == h->mb_width-1) //MB C not available
811 h->flags &= ~C_AVAIL;
812 /* clear top-right predictors if MB C is not available */
813 if(!(h->flags & C_AVAIL)) {
814 h->mv[MV_FWD_C2] = un_mv;
815 h->mv[MV_BWD_C2] = un_mv;
816 }
817 /* clear top-left predictors if MB D is not available */
818 if(!(h->flags & D_AVAIL)) {
819 h->mv[MV_FWD_D3] = un_mv;
820 h->mv[MV_BWD_D3] = un_mv;
821 }
822 /* set pointer for co-located macroblock type */
823 h->col_type = &h->col_type_base[h->mby*h->mb_width + h->mbx];
824 }
825
826 static inline void check_for_slice(AVSContext *h);
827
828 /**
829 * save predictors for later macroblocks and increase
830 * macroblock address
831 * @returns 0 if end of frame is reached, 1 otherwise
832 */
833 static inline int next_mb(AVSContext *h) {
834 int i;
835
836 h->flags |= A_AVAIL;
837 h->cy += 16;
838 h->cu += 8;
839 h->cv += 8;
840 /* copy mvs as predictors to the left */
841 for(i=0;i<=20;i+=4)
842 h->mv[i] = h->mv[i+2];
843 /* copy bottom mvs from cache to top line */
844 h->top_mv[0][h->mbx*2+0] = h->mv[MV_FWD_X2];
845 h->top_mv[0][h->mbx*2+1] = h->mv[MV_FWD_X3];
846 h->top_mv[1][h->mbx*2+0] = h->mv[MV_BWD_X2];
847 h->top_mv[1][h->mbx*2+1] = h->mv[MV_BWD_X3];
848 /* next MB address */
849 h->mbx++;
850 if(h->mbx == h->mb_width) { //new mb line
851 h->flags = B_AVAIL|C_AVAIL;
852 /* clear left pred_modes */
853 h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL;
854 /* clear left mv predictors */
855 for(i=0;i<=20;i+=4)
856 h->mv[i] = un_mv;
857 h->mbx = 0;
858 h->mby++;
859 /* re-calculate sample pointers */
860 h->cy = h->picture.data[0] + h->mby*16*h->l_stride;
861 h->cu = h->picture.data[1] + h->mby*8*h->c_stride;
862 h->cv = h->picture.data[2] + h->mby*8*h->c_stride;
863 if(h->mby == h->mb_height) { //frame end
864 return 0;
865 } else {
866 //check_for_slice(h);
867 }
868 }
869 return 1;
870 }
871
872 static int decode_mb_i(AVSContext *h) {
873 GetBitContext *gb = &h->s.gb;
874 int block, pred_mode_uv;
875 uint8_t top[18];
876 uint8_t left[18];
877 uint8_t *d;
878
879 init_mb(h);
880
881 /* get intra prediction modes from stream */
882 for(block=0;block<4;block++) {
883 int nA,nB,predpred;
884 int pos = scan3x3[block];
885
886 nA = h->pred_mode_Y[pos-1];
887 nB = h->pred_mode_Y[pos-3];
888 predpred = FFMIN(nA,nB);
889 if(predpred == NOT_AVAIL) // if either is not available
890 predpred = INTRA_L_LP;
891 if(!get_bits1(gb)){
892 int rem_mode= get_bits(gb, 2);
893 predpred = rem_mode + (rem_mode >= predpred);
894 }
895 h->pred_mode_Y[pos] = predpred;
896 }
897 pred_mode_uv = get_ue_golomb(gb);
898 if(pred_mode_uv > 6) {
899 av_log(h->s.avctx, AV_LOG_ERROR, "illegal intra chroma pred mode\n");
900 pred_mode_uv = 0;
901 }
902
903 /* save pred modes before they get modified */
904 h->pred_mode_Y[3] = h->pred_mode_Y[5];
905 h->pred_mode_Y[6] = h->pred_mode_Y[8];
906 h->top_pred_Y[h->mbx*2+0] = h->pred_mode_Y[7];
907 h->top_pred_Y[h->mbx*2+1] = h->pred_mode_Y[8];
908
909 /* modify pred modes according to availability of neighbour samples */
910 if(!(h->flags & A_AVAIL)) {
911 modify_pred(left_modifier_l, &h->pred_mode_Y[4] );
912 modify_pred(left_modifier_l, &h->pred_mode_Y[7] );
913 modify_pred(left_modifier_c, &pred_mode_uv );
914 }
915 if(!(h->flags & B_AVAIL)) {
916 modify_pred(top_modifier_l, &h->pred_mode_Y[4] );
917 modify_pred(top_modifier_l, &h->pred_mode_Y[5] );
918 modify_pred(top_modifier_c, &pred_mode_uv );
919 }
920
921 /* get coded block pattern */
922 if(h->pic_type == FF_I_TYPE){
923 int cbp= get_ue_golomb(gb);
924 if(cbp > 63){
925 av_log(h->s.avctx, AV_LOG_ERROR, "illegal intra cbp\n");
926 return -1;
927 }
928 h->cbp = cbp_tab[cbp][0];
929 }
930 if(h->cbp && !h->qp_fixed)
931 h->qp += get_se_golomb(gb); //qp_delta
932
933 /* luma intra prediction interleaved with residual decode/transform/add */
934 for(block=0;block<4;block++) {
935 d = h->cy + h->luma_scan[block];
936 load_intra_pred_luma(h, top, left, block);
937 h->intra_pred_l[h->pred_mode_Y[scan3x3[block]]]
938 (d, top, left, h->l_stride);
939 if(h->cbp & (1<<block))
940 decode_residual_block(h,gb,intra_2dvlc,1,h->qp,d,h->l_stride);
941 }
942
943 /* chroma intra prediction */
944 /* extend borders by one pixel */
945 h->left_border_u[9] = h->left_border_u[8];
946 h->left_border_v[9] = h->left_border_v[8];
947 h->top_border_u[h->mbx*10+9] = h->top_border_u[h->mbx*10+8];
948 h->top_border_v[h->mbx*10+9] = h->top_border_v[h->mbx*10+8];
949 if(h->mbx && h->mby) {
950 h->top_border_u[h->mbx*10] = h->left_border_u[0] = h->topleft_border_u;
951 h->top_border_v[h->mbx*10] = h->left_border_v[0] = h->topleft_border_v;
952 } else {
953 h->left_border_u[0] = h->left_border_u[1];
954 h->left_border_v[0] = h->left_border_v[1];
955 h->top_border_u[h->mbx*10] = h->top_border_u[h->mbx*10+1];
956 h->top_border_v[h->mbx*10] = h->top_border_v[h->mbx*10+1];
957 }
958 h->intra_pred_c[pred_mode_uv](h->cu, &h->top_border_u[h->mbx*10],
959 h->left_border_u, h->c_stride);
960 h->intra_pred_c[pred_mode_uv](h->cv, &h->top_border_v[h->mbx*10],
961 h->left_border_v, h->c_stride);
962
963 decode_residual_chroma(h);
964 filter_mb(h,I_8X8);
965
966 /* mark motion vectors as intra */
967 h->mv[MV_FWD_X0] = intra_mv;
968 set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
969 h->mv[MV_BWD_X0] = intra_mv;
970 set_mvs(&h->mv[MV_BWD_X0], BLK_16X16);
971 if(h->pic_type != FF_B_TYPE)
972 *h->col_type = I_8X8;
973
974 return 0;
975 }
976
977 static void decode_mb_p(AVSContext *h, enum mb_t mb_type) {
978 GetBitContext *gb = &h->s.gb;
979 int ref[4];
980
981 init_mb(h);
982 switch(mb_type) {
983 case P_SKIP:
984 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_PSKIP, BLK_16X16, 0);
985 break;
986 case P_16X16:
987 ref[0] = h->ref_flag ? 0 : get_bits1(gb);
988 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16,ref[0]);
989 break;
990 case P_16X8:
991 ref[0] = h->ref_flag ? 0 : get_bits1(gb);
992 ref[2] = h->ref_flag ? 0 : get_bits1(gb);
993 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, ref[0]);
994 mv_pred(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, ref[2]);
995 break;
996 case P_8X16:
997 ref[0] = h->ref_flag ? 0 : get_bits1(gb);
998 ref[1] = h->ref_flag ? 0 : get_bits1(gb);
999 mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, ref[0]);
1000 mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_TOPRIGHT, BLK_8X16, ref[1]);
1001 break;
1002 case P_8X8:
1003 ref[0] = h->ref_flag ? 0 : get_bits1(gb);
1004 ref[1] = h->ref_flag ? 0 : get_bits1(gb);
1005 ref[2] = h->ref_flag ? 0 : get_bits1(gb);
1006 ref[3] = h->ref_flag ? 0 : get_bits1(gb);
1007 mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_MEDIAN, BLK_8X8, ref[0]);
1008 mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_MEDIAN, BLK_8X8, ref[1]);
1009 mv_pred(h, MV_FWD_X2, MV_FWD_X1, MV_PRED_MEDIAN, BLK_8X8, ref[2]);
1010 mv_pred(h, MV_FWD_X3, MV_FWD_X0, MV_PRED_MEDIAN, BLK_8X8, ref[3]);
1011 }
1012 inter_pred(h);
1013 store_mvs(h);
1014 if(mb_type != P_SKIP)
1015 decode_residual_inter(h);
1016 filter_mb(h,mb_type);
1017 *h->col_type = mb_type;
1018 }
1019
1020 static void decode_mb_b(AVSContext *h, enum mb_t mb_type) {
1021 int block;
1022 enum sub_mb_t sub_type[4];
1023 int flags;
1024
1025 init_mb(h);
1026
1027 /* reset all MVs */
1028 h->mv[MV_FWD_X0] = dir_mv;
1029 set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
1030 h->mv[MV_BWD_X0] = dir_mv;
1031 set_mvs(&h->mv[MV_BWD_X0], BLK_16X16);
1032 switch(mb_type) {
1033 case B_SKIP:
1034 case B_DIRECT:
1035 if(!(*h->col_type)) {
1036 /* intra MB at co-location, do in-plane prediction */
1037 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_BSKIP, BLK_16X16, 1);
1038 mv_pred(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_BSKIP, BLK_16X16, 0);
1039 } else
1040 /* direct prediction from co-located P MB, block-wise */
1041 for(block=0;block<4;block++)
1042 mv_pred_direct(h,&h->mv[mv_scan[block]],
1043 &h->col_mv[(h->mby*h->mb_width+h->mbx)*4 + block]);
1044 break;
1045 case B_FWD_16X16:
1046 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16, 1);
1047 break;
1048 case B_SYM_16X16:
1049 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16, 1);
1050 mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_16X16);
1051 break;
1052 case B_BWD_16X16:
1053 mv_pred(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_MEDIAN, BLK_16X16, 0);
1054 break;
1055 case B_8X8:
1056 for(block=0;block<4;block++)
1057 sub_type[block] = get_bits(&h->s.gb,2);
1058 for(block=0;block<4;block++) {
1059 switch(sub_type[block]) {
1060 case B_SUB_DIRECT:
1061 if(!(*h->col_type)) {
1062 /* intra MB at co-location, do in-plane prediction */
1063 mv_pred(h, mv_scan[block], mv_scan[block]-3,
1064 MV_PRED_BSKIP, BLK_8X8, 1);
1065 mv_pred(h, mv_scan[block]+MV_BWD_OFFS,
1066 mv_scan[block]-3+MV_BWD_OFFS,
1067 MV_PRED_BSKIP, BLK_8X8, 0);
1068 } else
1069 mv_pred_direct(h,&h->mv[mv_scan[block]],
1070 &h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + block]);
1071 break;
1072 case B_SUB_FWD:
1073 mv_pred(h, mv_scan[block], mv_scan[block]-3,
1074 MV_PRED_MEDIAN, BLK_8X8, 1);
1075 break;
1076 case B_SUB_SYM:
1077 mv_pred(h, mv_scan[block], mv_scan[block]-3,
1078 MV_PRED_MEDIAN, BLK_8X8, 1);
1079 mv_pred_sym(h, &h->mv[mv_scan[block]], BLK_8X8);
1080 break;
1081 }
1082 }
1083 for(block=0;block<4;block++) {
1084 if(sub_type[block] == B_SUB_BWD)
1085 mv_pred(h, mv_scan[block]+MV_BWD_OFFS,
1086 mv_scan[block]+MV_BWD_OFFS-3,
1087 MV_PRED_MEDIAN, BLK_8X8, 0);
1088 }
1089 break;
1090 default:
1091 assert((mb_type > B_SYM_16X16) && (mb_type < B_8X8));
1092 flags = b_partition_flags[(mb_type-1)>>1];
1093 if(mb_type & 1) { /* 16x8 macroblock types */
1094 if(flags & FWD0)
1095 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, 1);
1096 if(flags & SYM0) {
1097 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, 1);
1098 mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_16X8);
1099 }
1100 if(flags & FWD1)
1101 mv_pred(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, 1);
1102 if(flags & SYM1) {
1103 mv_pred(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, 1);
1104 mv_pred_sym(h, &h->mv[9], BLK_16X8);
1105 }
1106 if(flags & BWD0)
1107 mv_pred(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_TOP, BLK_16X8, 0);
1108 if(flags & BWD1)
1109 mv_pred(h, MV_BWD_X2, MV_BWD_A1, MV_PRED_LEFT, BLK_16X8, 0);
1110 } else { /* 8x16 macroblock types */
1111 if(flags & FWD0)
1112 mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, 1);
1113 if(flags & SYM0) {
1114 mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, 1);
1115 mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_8X16);
1116 }
1117 if(flags & FWD1)
1118 mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_TOPRIGHT,BLK_8X16, 1);
1119 if(flags & SYM1) {
1120 mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_TOPRIGHT,BLK_8X16, 1);
1121 mv_pred_sym(h, &h->mv[6], BLK_8X16);
1122 }
1123 if(flags & BWD0)
1124 mv_pred(h, MV_BWD_X0, MV_BWD_B3, MV_PRED_LEFT, BLK_8X16, 0);
1125 if(flags & BWD1)
1126 mv_pred(h, MV_BWD_X1, MV_BWD_C2, MV_PRED_TOPRIGHT,BLK_8X16, 0);
1127 }
1128 }
1129 inter_pred(h);
1130 if(mb_type != B_SKIP)
1131 decode_residual_inter(h);
1132 filter_mb(h,mb_type);
1133 }
1134
1135 /*****************************************************************************
1136 *
1137 * slice level
1138 *
1139 ****************************************************************************/
1140
1141 static inline int decode_slice_header(AVSContext *h, GetBitContext *gb) {
1142 if(h->stc > 0xAF)
1143 av_log(h->s.avctx, AV_LOG_ERROR, "unexpected start code 0x%02x\n", h->stc);
1144 h->mby = h->stc;
1145 if((h->mby == 0) && (!h->qp_fixed)){
1146 h->qp_fixed = get_bits1(gb);
1147 h->qp = get_bits(gb,6);
1148 }
1149 /* inter frame or second slice can have weighting params */
1150 if((h->pic_type != FF_I_TYPE) || (!h->pic_structure && h->mby >= h->mb_width/2))
1151 if(get_bits1(gb)) { //slice_weighting_flag
1152 av_log(h->s.avctx, AV_LOG_ERROR,
1153 "weighted prediction not yet supported\n");
1154 }
1155 return 0;
1156 }
1157
1158 static inline void check_for_slice(AVSContext *h) {
1159 GetBitContext *gb = &h->s.gb;
1160 int align;
1161 align = (-get_bits_count(gb)) & 7;
1162 if((show_bits_long(gb,24+align) & 0xFFFFFF) == 0x000001) {
1163 get_bits_long(gb,24+align);
1164 h->stc = get_bits(gb,8);
1165 decode_slice_header(h,gb);
1166 }
1167 }
1168
1169 /*****************************************************************************
1170 *
1171 * frame level
1172 *
1173 ****************************************************************************/
1174
1175 static void init_pic(AVSContext *h) {
1176 int i;
1177
1178 /* clear some predictors */
1179 for(i=0;i<=20;i+=4)
1180 h->mv[i] = un_mv;
1181 h->mv[MV_BWD_X0] = dir_mv;
1182 set_mvs(&h->mv[MV_BWD_X0], BLK_16X16);
1183 h->mv[MV_FWD_X0] = dir_mv;
1184 set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
1185 h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL;
1186 h->cy = h->picture.data[0];
1187 h->cu = h->picture.data[1];
1188 h->cv = h->picture.data[2];
1189 h->l_stride = h->picture.linesize[0];
1190 h->c_stride = h->picture.linesize[1];
1191 h->luma_scan[2] = 8*h->l_stride;
1192 h->luma_scan[3] = 8*h->l_stride+8;
1193 h->mbx = h->mby = 0;
1194 h->flags = 0;
1195 }
1196
1197 static int decode_pic(AVSContext *h) {
1198 MpegEncContext *s = &h->s;
1199 int skip_count;
1200 enum mb_t mb_type;
1201
1202 if (!s->context_initialized) {
1203 if (MPV_common_init(s) < 0)
1204 return -1;
1205 }
1206 get_bits(&s->gb,16);//bbv_dwlay
1207 if(h->stc == PIC_PB_START_CODE) {
1208 h->pic_type = get_bits(&s->gb,2) + FF_I_TYPE;
1209 /* make sure we have the reference frames we need */
1210 if(!h->DPB[0].data[0] ||
1211 (!h->DPB[1].data[0] && h->pic_type == FF_B_TYPE))
1212 return -1;
1213 } else {
1214 h->pic_type = FF_I_TYPE;
1215 if(get_bits1(&s->gb))
1216 get_bits(&s->gb,16);//time_code
1217 }
1218 /* release last B frame */
1219 if(h->picture.data[0])
1220 s->avctx->release_buffer(s->avctx, (AVFrame *)&h->picture);
1221
1222 s->avctx->get_buffer(s->avctx, (AVFrame *)&h->picture);
1223 init_pic(h);
1224 h->picture.poc = get_bits(&s->gb,8)*2;
1225
1226 /* get temporal distances and MV scaling factors */
1227 if(h->pic_type != FF_B_TYPE) {
1228 h->dist[0] = (h->picture.poc - h->DPB[0].poc + 512) % 512;
1229 } else {
1230 h->dist[0] = (h->DPB[0].poc - h->picture.poc + 512) % 512;
1231 }
1232 h->dist[1] = (h->picture.poc - h->DPB[1].poc + 512) % 512;
1233 h->scale_den[0] = h->dist[0] ? 512/h->dist[0] : 0;
1234 h->scale_den[1] = h->dist[1] ? 512/h->dist[1] : 0;
1235 if(h->pic_type == FF_B_TYPE) {
1236 h->sym_factor = h->dist[0]*h->scale_den[1];
1237 } else {
1238 h->direct_den[0] = h->dist[0] ? 16384/h->dist[0] : 0;
1239 h->direct_den[1] = h->dist[1] ? 16384/h->dist[1] : 0;
1240 }
1241
1242 if(s->low_delay)
1243 get_ue_golomb(&s->gb); //bbv_check_times
1244 h->progressive = get_bits1(&s->gb);
1245 if(h->progressive)
1246 h->pic_structure = 1;
1247 else if(!(h->pic_structure = get_bits1(&s->gb) && (h->stc == PIC_PB_START_CODE)) )
1248 get_bits1(&s->gb); //advanced_pred_mode_disable
1249 skip_bits1(&s->gb); //top_field_first
1250 skip_bits1(&s->gb); //repeat_first_field
1251 h->qp_fixed = get_bits1(&s->gb);
1252 h->qp = get_bits(&s->gb,6);
1253 if(h->pic_type == FF_I_TYPE) {
1254 if(!h->progressive && !h->pic_structure)
1255 skip_bits1(&s->gb);//what is this?
1256 skip_bits(&s->gb,4); //reserved bits
1257 } else {
1258 if(!(h->pic_type == FF_B_TYPE && h->pic_structure == 1))
1259 h->ref_flag = get_bits1(&s->gb);
1260 skip_bits(&s->gb,4); //reserved bits
1261 h->skip_mode_flag = get_bits1(&s->gb);
1262 }
1263 h->loop_filter_disable = get_bits1(&s->gb);
1264 if(!h->loop_filter_disable && get_bits1(&s->gb)) {
1265 h->alpha_offset = get_se_golomb(&s->gb);
1266 h->beta_offset = get_se_golomb(&s->gb);
1267 } else {
1268 h->alpha_offset = h->beta_offset = 0;
1269 }
1270 check_for_slice(h);
1271 if(h->pic_type == FF_I_TYPE) {
1272 do {
1273 decode_mb_i(h);
1274 } while(next_mb(h));
1275 } else if(h->pic_type == FF_P_TYPE) {
1276 do {
1277 if(h->skip_mode_flag) {
1278 skip_count = get_ue_golomb(&s->gb);
1279 while(skip_count--) {
1280 decode_mb_p(h,P_SKIP);
1281 if(!next_mb(h))
1282 goto done;
1283 }
1284 mb_type = get_ue_golomb(&s->gb) + P_16X16;
1285 } else
1286 mb_type = get_ue_golomb(&s->gb) + P_SKIP;
1287 if(mb_type > P_8X8) {
1288 h->cbp = cbp_tab[mb_type - P_8X8 - 1][0];
1289 decode_mb_i(h);
1290 } else
1291 decode_mb_p(h,mb_type);
1292 } while(next_mb(h));
1293 } else { /* FF_B_TYPE */
1294 do {
1295 if(h->skip_mode_flag) {
1296 skip_count = get_ue_golomb(&s->gb);
1297 while(skip_count--) {
1298 decode_mb_b(h,B_SKIP);
1299 if(!next_mb(h))
1300 goto done;
1301 }
1302 mb_type = get_ue_golomb(&s->gb) + B_DIRECT;
1303 } else
1304 mb_type = get_ue_golomb(&s->gb) + B_SKIP;
1305 init_mb(h);
1306 if(mb_type > B_8X8) {
1307 h->cbp = cbp_tab[mb_type - B_8X8 - 1][0];
1308 decode_mb_i(h);
1309 } else
1310 decode_mb_b(h,mb_type);
1311 } while(next_mb(h));
1312 }
1313 done:
1314 if(h->pic_type != FF_B_TYPE) {
1315 if(h->DPB[1].data[0])
1316 s->avctx->release_buffer(s->avctx, (AVFrame *)&h->DPB[1]);
1317 memcpy(&h->DPB[1], &h->DPB[0], sizeof(Picture));
1318 memcpy(&h->DPB[0], &h->picture, sizeof(Picture));
1319 memset(&h->picture,0,sizeof(Picture));
1320 }
1321 return 0;
1322 }
1323
1324 /*****************************************************************************
1325 *
1326 * headers and interface
1327 *
1328 ****************************************************************************/
1329
1330 /**
1331 * some predictions require data from the top-neighbouring macroblock.
1332 * this data has to be stored for one complete row of macroblocks
1333 * and this storage space is allocated here
1334 */
1335 static void init_top_lines(AVSContext *h) {
1336 /* alloc top line of predictors */
1337 h->top_qp = av_malloc( h->mb_width);
1338 h->top_mv[0] = av_malloc((h->mb_width*2+1)*sizeof(vector_t));
1339 h->top_mv[1] = av_malloc((h->mb_width*2+1)*sizeof(vector_t));
1340 h->top_pred_Y = av_malloc( h->mb_width*2*sizeof(*h->top_pred_Y));
1341 h->top_border_y = av_malloc((h->mb_width+1)*16);
1342 h->top_border_u = av_malloc((h->mb_width)*10);
1343 h->top_border_v = av_malloc((h->mb_width)*10);
1344
1345 /* alloc space for co-located MVs and types */
1346 h->col_mv = av_malloc( h->mb_width*h->mb_height*4*sizeof(vector_t));
1347 h->col_type_base = av_malloc(h->mb_width*h->mb_height);
1348 }
1349
1350 static int decode_seq_header(AVSContext *h) {
1351 MpegEncContext *s = &h->s;
1352 extern const AVRational ff_frame_rate_tab[];
1353 int frame_rate_code;
1354
1355 h->profile = get_bits(&s->gb,8);
1356 h->level = get_bits(&s->gb,8);
1357 skip_bits1(&s->gb); //progressive sequence
1358 s->width = get_bits(&s->gb,14);
1359 s->height = get_bits(&s->gb,14);
1360 skip_bits(&s->gb,2); //chroma format
1361 skip_bits(&s->gb,3); //sample_precision
1362 h->aspect_ratio = get_bits(&s->gb,4);
1363 frame_rate_code = get_bits(&s->gb,4);
1364 skip_bits(&s->gb,18);//bit_rate_lower
1365 skip_bits1(&s->gb); //marker_bit
1366 skip_bits(&s->gb,12);//bit_rate_upper
1367 s->low_delay = get_bits1(&s->gb);
1368 h->mb_width = (s->width + 15) >> 4;
1369 h->mb_height = (s->height + 15) >> 4;
1370 h->s.avctx->time_base.den = ff_frame_rate_tab[frame_rate_code].num;
1371 h->s.avctx->time_base.num = ff_frame_rate_tab[frame_rate_code].den;
1372 h->s.avctx->width = s->width;
1373 h->s.avctx->height = s->height;
1374 if(!h->top_qp)
1375 init_top_lines(h);
1376 return 0;
1377 }
1378
1379 /**
1380 * finds the end of the current frame in the bitstream.
1381 * @return the position of the first byte of the next frame, or -1
1382 */
1383 int ff_cavs_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size) {
1384 int pic_found, i;
1385 uint32_t state;
1386
1387 pic_found= pc->frame_start_found;
1388 state= pc->state;
1389
1390 i=0;
1391 if(!pic_found){
1392 for(i=0; i<buf_size; i++){
1393 state= (state<<8) | buf[i];
1394 if(state == PIC_I_START_CODE || state == PIC_PB_START_CODE){
1395 i++;
1396 pic_found=1;
1397 break;
1398 }
1399 }
1400 }
1401
1402 if(pic_found){
1403 /* EOF considered as end of frame */
1404 if (buf_size == 0)
1405 return 0;
1406 for(; i<buf_size; i++){
1407 state= (state<<8) | buf[i];
1408 if((state&0xFFFFFF00) == 0x100){
1409 if(state < SLICE_MIN_START_CODE || state > SLICE_MAX_START_CODE){
1410 pc->frame_start_found=0;
1411 pc->state=-1;
1412 return i-3;
1413 }
1414 }
1415 }
1416 }
1417 pc->frame_start_found= pic_found;
1418 pc->state= state;
1419 return END_NOT_FOUND;
1420 }
1421
1422 void ff_cavs_flush(AVCodecContext * avctx) {
1423 AVSContext *h = avctx->priv_data;
1424 h->got_keyframe = 0;
1425 }
1426
1427 static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
1428 uint8_t * buf, int buf_size) {
1429 AVSContext *h = avctx->priv_data;
1430 MpegEncContext *s = &h->s;
1431 int input_size;
1432 const uint8_t *buf_end;
1433 const uint8_t *buf_ptr;
1434 AVFrame *picture = data;
1435 uint32_t stc;
1436
1437 s->avctx = avctx;
1438
1439 if (buf_size == 0) {
1440 if(!s->low_delay && h->DPB[0].data[0]) {
1441 *data_size = sizeof(AVPicture);
1442 *picture = *(AVFrame *) &h->DPB[0];
1443 }
1444 return 0;
1445 }
1446
1447 buf_ptr = buf;
1448 buf_end = buf + buf_size;
1449 for(;;) {
1450 buf_ptr = ff_find_start_code(buf_ptr,buf_end, &stc);
1451 if(stc & 0xFFFFFE00)
1452 return FFMAX(0, buf_ptr - buf - s->parse_context.last_index);
1453 input_size = (buf_end - buf_ptr)*8;
1454 switch(stc) {
1455 case SEQ_START_CODE:
1456 init_get_bits(&s->gb, buf_ptr, input_size);
1457 decode_seq_header(h);
1458 break;
1459 case PIC_I_START_CODE:
1460 if(!h->got_keyframe) {
1461 if(h->DPB[0].data[0])
1462 avctx->release_buffer(avctx, (AVFrame *)&h->DPB[0]);
1463 if(h->DPB[1].data[0])
1464 avctx->release_buffer(avctx, (AVFrame *)&h->DPB[1]);
1465 h->got_keyframe = 1;
1466 }
1467 case PIC_PB_START_CODE:
1468 *data_size = 0;
1469 if(!h->got_keyframe)
1470 break;
1471 init_get_bits(&s->gb, buf_ptr, input_size);
1472 h->stc = stc;
1473 if(decode_pic(h))
1474 break;
1475 *data_size = sizeof(AVPicture);
1476 if(h->pic_type != FF_B_TYPE) {
1477 if(h->DPB[1].data[0]) {
1478 *picture = *(AVFrame *) &h->DPB[1];
1479 } else {
1480 *data_size = 0;
1481 }
1482 } else
1483 *picture = *(AVFrame *) &h->picture;
1484 break;
1485 case EXT_START_CODE:
1486 //mpeg_decode_extension(avctx,buf_ptr, input_size);
1487 break;
1488 case USER_START_CODE:
1489 //mpeg_decode_user_data(avctx,buf_ptr, input_size);
1490 break;
1491 default:
1492 if (stc >= SLICE_MIN_START_CODE &&
1493 stc <= SLICE_MAX_START_CODE) {
1494 init_get_bits(&s->gb, buf_ptr, input_size);
1495 decode_slice_header(h, &s->gb);
1496 }
1497 break;
1498 }
1499 }
1500 }
1501
1502 static int cavs_decode_init(AVCodecContext * avctx) {
1503 AVSContext *h = avctx->priv_data;
1504 MpegEncContext * const s = &h->s;
1505
1506 MPV_decode_defaults(s);
1507 s->avctx = avctx;
1508
1509 avctx->pix_fmt= PIX_FMT_YUV420P;
1510
1511 h->luma_scan[0] = 0;
1512 h->luma_scan[1] = 8;
1513 h->intra_pred_l[ INTRA_L_VERT] = intra_pred_vert;
1514 h->intra_pred_l[ INTRA_L_HORIZ] = intra_pred_horiz;
1515 h->intra_pred_l[ INTRA_L_LP] = intra_pred_lp;
1516 h->intra_pred_l[ INTRA_L_DOWN_LEFT] = intra_pred_down_left;
1517 h->intra_pred_l[INTRA_L_DOWN_RIGHT] = intra_pred_down_right;
1518 h->intra_pred_l[ INTRA_L_LP_LEFT] = intra_pred_lp_left;
1519 h->intra_pred_l[ INTRA_L_LP_TOP] = intra_pred_lp_top;
1520 h->intra_pred_l[ INTRA_L_DC_128] = intra_pred_dc_128;
1521 h->intra_pred_c[ INTRA_C_LP] = intra_pred_lp;
1522 h->intra_pred_c[ INTRA_C_HORIZ] = intra_pred_horiz;
1523 h->intra_pred_c[ INTRA_C_VERT] = intra_pred_vert;
1524 h->intra_pred_c[ INTRA_C_PLANE] = intra_pred_plane;
1525 h->intra_pred_c[ INTRA_C_LP_LEFT] = intra_pred_lp_left;
1526 h->intra_pred_c[ INTRA_C_LP_TOP] = intra_pred_lp_top;
1527 h->intra_pred_c[ INTRA_C_DC_128] = intra_pred_dc_128;
1528 h->mv[ 7] = un_mv;
1529 h->mv[19] = un_mv;
1530 return 0;
1531 }
1532
1533 static int cavs_decode_end(AVCodecContext * avctx) {
1534 AVSContext *h = avctx->priv_data;
1535
1536 av_free(h->top_qp);
1537 av_free(h->top_mv[0]);
1538 av_free(h->top_mv[1]);
1539 av_free(h->top_pred_Y);
1540 av_free(h->top_border_y);
1541 av_free(h->top_border_u);
1542 av_free(h->top_border_v);
1543 av_free(h->col_mv);
1544 av_free(h->col_type_base);
1545 return 0;
1546 }
1547
1548 AVCodec cavs_decoder = {
1549 "cavs",
1550 CODEC_TYPE_VIDEO,
1551 CODEC_ID_CAVS,
1552 sizeof(AVSContext),
1553 cavs_decode_init,
1554 NULL,
1555 cavs_decode_end,
1556 cavs_decode_frame,
1557 CODEC_CAP_DR1 | CODEC_CAP_DELAY,
1558 .flush= ff_cavs_flush,
1559 };