be32e6cd40f58090111e6ed27e323572776cc72e
[libav.git] / libavcodec / hevc_mvs.c
1 /*
2 * HEVC video decoder
3 *
4 * Copyright (C) 2012 - 2013 Guillaume Martres
5 * Copyright (C) 2013 Anand Meher Kotra
6 *
7 * This file is part of Libav.
8 *
9 * Libav is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * Libav is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with Libav; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24 #include "hevc.h"
25 #include "hevcdec.h"
26
27 static const uint8_t l0_l1_cand_idx[12][2] = {
28 { 0, 1, },
29 { 1, 0, },
30 { 0, 2, },
31 { 2, 0, },
32 { 1, 2, },
33 { 2, 1, },
34 { 0, 3, },
35 { 3, 0, },
36 { 1, 3, },
37 { 3, 1, },
38 { 2, 3, },
39 { 3, 2, },
40 };
41
42 void ff_hevc_set_neighbour_available(HEVCContext *s, int x0, int y0,
43 int nPbW, int nPbH)
44 {
45 HEVCLocalContext *lc = &s->HEVClc;
46 int x0b = x0 & ((1 << s->ps.sps->log2_ctb_size) - 1);
47 int y0b = y0 & ((1 << s->ps.sps->log2_ctb_size) - 1);
48
49 lc->na.cand_up = (lc->ctb_up_flag || y0b);
50 lc->na.cand_left = (lc->ctb_left_flag || x0b);
51 lc->na.cand_up_left = (!x0b && !y0b) ? lc->ctb_up_left_flag : lc->na.cand_left && lc->na.cand_up;
52 lc->na.cand_up_right_sap =
53 ((x0b + nPbW) == (1 << s->ps.sps->log2_ctb_size)) ?
54 lc->ctb_up_right_flag && !y0b : lc->na.cand_up;
55 lc->na.cand_up_right =
56 ((x0b + nPbW) == (1 << s->ps.sps->log2_ctb_size) ?
57 lc->ctb_up_right_flag && !y0b : lc->na.cand_up )
58 && (x0 + nPbW) < lc->end_of_tiles_x;
59 lc->na.cand_bottom_left = ((y0 + nPbH) >= lc->end_of_tiles_y) ? 0 : lc->na.cand_left;
60 }
61
62 /*
63 * 6.4.1 Derivation process for z-scan order block availability
64 */
65 static int z_scan_block_avail(HEVCContext *s, int xCurr, int yCurr,
66 int xN, int yN)
67 {
68 #define MIN_TB_ADDR_ZS(x, y) \
69 s->ps.pps->min_tb_addr_zs[(y) * s->ps.sps->min_tb_width + (x)]
70 int Curr = MIN_TB_ADDR_ZS(xCurr >> s->ps.sps->log2_min_tb_size,
71 yCurr >> s->ps.sps->log2_min_tb_size);
72 int N;
73
74 if (xN < 0 || yN < 0 ||
75 xN >= s->ps.sps->width ||
76 yN >= s->ps.sps->height)
77 return 0;
78
79 N = MIN_TB_ADDR_ZS(xN >> s->ps.sps->log2_min_tb_size,
80 yN >> s->ps.sps->log2_min_tb_size);
81
82 return N <= Curr;
83 }
84
85 static int same_prediction_block(HEVCLocalContext *lc, int log2_cb_size,
86 int x0, int y0, int nPbW, int nPbH,
87 int xA1, int yA1, int partIdx)
88 {
89 return !(nPbW << 1 == 1 << log2_cb_size &&
90 nPbH << 1 == 1 << log2_cb_size && partIdx == 1 &&
91 lc->cu.x + nPbW > xA1 &&
92 lc->cu.y + nPbH <= yA1);
93 }
94
95 /*
96 * 6.4.2 Derivation process for prediction block availability
97 */
98 static int check_prediction_block_available(HEVCContext *s, int log2_cb_size,
99 int x0, int y0, int nPbW, int nPbH,
100 int xA1, int yA1, int partIdx)
101 {
102 HEVCLocalContext *lc = &s->HEVClc;
103
104 if (lc->cu.x < xA1 && lc->cu.y < yA1 &&
105 (lc->cu.x + (1 << log2_cb_size)) > xA1 &&
106 (lc->cu.y + (1 << log2_cb_size)) > yA1)
107 return same_prediction_block(lc, log2_cb_size, x0, y0,
108 nPbW, nPbH, xA1, yA1, partIdx);
109 else
110 return z_scan_block_avail(s, x0, y0, xA1, yA1);
111 }
112
113 //check if the two luma locations belong to the same motion estimation region
114 static int isDiffMER(HEVCContext *s, int xN, int yN, int xP, int yP)
115 {
116 uint8_t plevel = s->ps.pps->log2_parallel_merge_level;
117
118 return xN >> plevel == xP >> plevel &&
119 yN >> plevel == yP >> plevel;
120 }
121
122 #define MATCH_MV(x) (AV_RN32A(&A.x) == AV_RN32A(&B.x))
123 #define MATCH(x) (A.x == B.x)
124
125 // check if the mv's and refidx are the same between A and B
126 static int compareMVrefidx(struct MvField A, struct MvField B)
127 {
128 if (A.pred_flag[0] && A.pred_flag[1] && B.pred_flag[0] && B.pred_flag[1])
129 return MATCH(ref_idx[0]) && MATCH_MV(mv[0]) &&
130 MATCH(ref_idx[1]) && MATCH_MV(mv[1]);
131
132 if (A.pred_flag[0] && !A.pred_flag[1] && B.pred_flag[0] && !B.pred_flag[1])
133 return MATCH(ref_idx[0]) && MATCH_MV(mv[0]);
134
135 if (!A.pred_flag[0] && A.pred_flag[1] && !B.pred_flag[0] && B.pred_flag[1])
136 return MATCH(ref_idx[1]) && MATCH_MV(mv[1]);
137
138 return 0;
139 }
140
141 static av_always_inline void mv_scale(Mv *dst, Mv *src, int td, int tb)
142 {
143 int tx, scale_factor;
144
145 td = av_clip_int8(td);
146 tb = av_clip_int8(tb);
147 tx = (0x4000 + abs(td / 2)) / td;
148 scale_factor = av_clip((tb * tx + 32) >> 6, -4096, 4095);
149 dst->x = av_clip_int16((scale_factor * src->x + 127 +
150 (scale_factor * src->x < 0)) >> 8);
151 dst->y = av_clip_int16((scale_factor * src->y + 127 +
152 (scale_factor * src->y < 0)) >> 8);
153 }
154
155 static int check_mvset(Mv *mvLXCol, Mv *mvCol,
156 int colPic, int poc,
157 RefPicList *refPicList, int X, int refIdxLx,
158 RefPicList *refPicList_col, int listCol, int refidxCol)
159 {
160 int cur_lt = refPicList[X].isLongTerm[refIdxLx];
161 int col_lt = refPicList_col[listCol].isLongTerm[refidxCol];
162 int col_poc_diff, cur_poc_diff;
163
164 if (cur_lt != col_lt) {
165 mvLXCol->x = 0;
166 mvLXCol->y = 0;
167 return 0;
168 }
169
170 col_poc_diff = colPic - refPicList_col[listCol].list[refidxCol];
171 cur_poc_diff = poc - refPicList[X].list[refIdxLx];
172
173 if (!col_poc_diff)
174 col_poc_diff = 1; // error resilience
175
176 if (cur_lt || col_poc_diff == cur_poc_diff) {
177 mvLXCol->x = mvCol->x;
178 mvLXCol->y = mvCol->y;
179 } else {
180 mv_scale(mvLXCol, mvCol, col_poc_diff, cur_poc_diff);
181 }
182 return 1;
183 }
184
185 #define CHECK_MVSET(l) \
186 check_mvset(mvLXCol, temp_col.mv + l, \
187 colPic, s->poc, \
188 refPicList, X, refIdxLx, \
189 refPicList_col, L ## l, temp_col.ref_idx[l])
190
191 // derive the motion vectors section 8.5.3.1.8
192 static int derive_temporal_colocated_mvs(HEVCContext *s, MvField temp_col,
193 int refIdxLx, Mv *mvLXCol, int X,
194 int colPic, RefPicList *refPicList_col)
195 {
196 RefPicList *refPicList = s->ref->refPicList;
197
198 if (temp_col.is_intra) {
199 mvLXCol->x = 0;
200 mvLXCol->y = 0;
201 return 0;
202 }
203
204 if (temp_col.pred_flag[0] == 0)
205 return CHECK_MVSET(1);
206 else if (temp_col.pred_flag[0] == 1 && temp_col.pred_flag[1] == 0)
207 return CHECK_MVSET(0);
208 else if (temp_col.pred_flag[0] == 1 && temp_col.pred_flag[1] == 1) {
209 int check_diffpicount = 0;
210 int i = 0;
211 for (i = 0; i < refPicList[0].nb_refs; i++) {
212 if (refPicList[0].list[i] > s->poc)
213 check_diffpicount++;
214 }
215 for (i = 0; i < refPicList[1].nb_refs; i++) {
216 if (refPicList[1].list[i] > s->poc)
217 check_diffpicount++;
218 }
219 if (check_diffpicount == 0 && X == 0)
220 return CHECK_MVSET(0);
221 else if (check_diffpicount == 0 && X == 1)
222 return CHECK_MVSET(1);
223 else {
224 if (s->sh.collocated_list == L1)
225 return CHECK_MVSET(0);
226 else
227 return CHECK_MVSET(1);
228 }
229 }
230
231 return 0;
232 }
233
234 #define TAB_MVF(x, y) \
235 tab_mvf[(y) * min_pu_width + x]
236
237 #define TAB_MVF_PU(v) \
238 TAB_MVF(x ## v ## _pu, y ## v ## _pu)
239
240 #define DERIVE_TEMPORAL_COLOCATED_MVS \
241 derive_temporal_colocated_mvs(s, temp_col, \
242 refIdxLx, mvLXCol, X, colPic, \
243 ff_hevc_get_ref_list(s, ref, x, y))
244
245 /*
246 * 8.5.3.1.7 temporal luma motion vector prediction
247 */
248 static int temporal_luma_motion_vector(HEVCContext *s, int x0, int y0,
249 int nPbW, int nPbH, int refIdxLx,
250 Mv *mvLXCol, int X)
251 {
252 MvField *tab_mvf;
253 MvField temp_col;
254 int x, y, x_pu, y_pu;
255 int min_pu_width = s->ps.sps->min_pu_width;
256 int availableFlagLXCol = 0;
257 int colPic;
258
259 HEVCFrame *ref = s->ref->collocated_ref;
260
261 if (!ref) {
262 memset(mvLXCol, 0, sizeof(*mvLXCol));
263 return 0;
264 }
265
266 tab_mvf = ref->tab_mvf;
267 colPic = ref->poc;
268
269 //bottom right collocated motion vector
270 x = x0 + nPbW;
271 y = y0 + nPbH;
272
273 if (tab_mvf &&
274 (y0 >> s->ps.sps->log2_ctb_size) == (y >> s->ps.sps->log2_ctb_size) &&
275 y < s->ps.sps->height &&
276 x < s->ps.sps->width) {
277 x &= ~15;
278 y &= ~15;
279 ff_thread_await_progress(&ref->tf, y, 0);
280 x_pu = x >> s->ps.sps->log2_min_pu_size;
281 y_pu = y >> s->ps.sps->log2_min_pu_size;
282 temp_col = TAB_MVF(x_pu, y_pu);
283 availableFlagLXCol = DERIVE_TEMPORAL_COLOCATED_MVS;
284 }
285
286 // derive center collocated motion vector
287 if (tab_mvf && !availableFlagLXCol) {
288 x = x0 + (nPbW >> 1);
289 y = y0 + (nPbH >> 1);
290 x &= ~15;
291 y &= ~15;
292 ff_thread_await_progress(&ref->tf, y, 0);
293 x_pu = x >> s->ps.sps->log2_min_pu_size;
294 y_pu = y >> s->ps.sps->log2_min_pu_size;
295 temp_col = TAB_MVF(x_pu, y_pu);
296 availableFlagLXCol = DERIVE_TEMPORAL_COLOCATED_MVS;
297 }
298 return availableFlagLXCol;
299 }
300
301 #define AVAILABLE(cand, v) \
302 (cand && !TAB_MVF_PU(v).is_intra)
303
304 #define PRED_BLOCK_AVAILABLE(v) \
305 check_prediction_block_available(s, log2_cb_size, \
306 x0, y0, nPbW, nPbH, \
307 x ## v, y ## v, part_idx)
308
309 #define COMPARE_MV_REFIDX(a, b) \
310 compareMVrefidx(TAB_MVF_PU(a), TAB_MVF_PU(b))
311
312 /*
313 * 8.5.3.1.2 Derivation process for spatial merging candidates
314 */
315 static void derive_spatial_merge_candidates(HEVCContext *s, int x0, int y0,
316 int nPbW, int nPbH,
317 int log2_cb_size,
318 int singleMCLFlag, int part_idx,
319 int merge_idx,
320 struct MvField mergecandlist[])
321 {
322 HEVCLocalContext *lc = &s->HEVClc;
323 RefPicList *refPicList = s->ref->refPicList;
324 MvField *tab_mvf = s->ref->tab_mvf;
325
326 const int min_pu_width = s->ps.sps->min_pu_width;
327
328 const int cand_bottom_left = lc->na.cand_bottom_left;
329 const int cand_left = lc->na.cand_left;
330 const int cand_up_left = lc->na.cand_up_left;
331 const int cand_up = lc->na.cand_up;
332 const int cand_up_right = lc->na.cand_up_right_sap;
333
334 const int xA1 = x0 - 1;
335 const int yA1 = y0 + nPbH - 1;
336 const int xA1_pu = xA1 >> s->ps.sps->log2_min_pu_size;
337 const int yA1_pu = yA1 >> s->ps.sps->log2_min_pu_size;
338
339 const int xB1 = x0 + nPbW - 1;
340 const int yB1 = y0 - 1;
341 const int xB1_pu = xB1 >> s->ps.sps->log2_min_pu_size;
342 const int yB1_pu = yB1 >> s->ps.sps->log2_min_pu_size;
343
344 const int xB0 = x0 + nPbW;
345 const int yB0 = y0 - 1;
346 const int xB0_pu = xB0 >> s->ps.sps->log2_min_pu_size;
347 const int yB0_pu = yB0 >> s->ps.sps->log2_min_pu_size;
348
349 const int xA0 = x0 - 1;
350 const int yA0 = y0 + nPbH;
351 const int xA0_pu = xA0 >> s->ps.sps->log2_min_pu_size;
352 const int yA0_pu = yA0 >> s->ps.sps->log2_min_pu_size;
353
354 const int xB2 = x0 - 1;
355 const int yB2 = y0 - 1;
356 const int xB2_pu = xB2 >> s->ps.sps->log2_min_pu_size;
357 const int yB2_pu = yB2 >> s->ps.sps->log2_min_pu_size;
358
359 const int nb_refs = (s->sh.slice_type == HEVC_SLICE_P) ?
360 s->sh.nb_refs[0] : FFMIN(s->sh.nb_refs[0], s->sh.nb_refs[1]);
361 int check_MER = 1;
362 int check_MER_1 = 1;
363
364 int zero_idx = 0;
365
366 int nb_merge_cand = 0;
367 int nb_orig_merge_cand = 0;
368
369 int is_available_a0;
370 int is_available_a1;
371 int is_available_b0;
372 int is_available_b1;
373 int is_available_b2;
374 int check_B0;
375 int check_A0;
376
377 //first left spatial merge candidate
378 is_available_a1 = AVAILABLE(cand_left, A1);
379
380 if (!singleMCLFlag && part_idx == 1 &&
381 (lc->cu.part_mode == PART_Nx2N ||
382 lc->cu.part_mode == PART_nLx2N ||
383 lc->cu.part_mode == PART_nRx2N) ||
384 isDiffMER(s, xA1, yA1, x0, y0)) {
385 is_available_a1 = 0;
386 }
387
388 if (is_available_a1) {
389 mergecandlist[0] = TAB_MVF_PU(A1);
390 if (merge_idx == 0)
391 return;
392 nb_merge_cand++;
393 }
394
395 // above spatial merge candidate
396 is_available_b1 = AVAILABLE(cand_up, B1);
397
398 if (!singleMCLFlag && part_idx == 1 &&
399 (lc->cu.part_mode == PART_2NxN ||
400 lc->cu.part_mode == PART_2NxnU ||
401 lc->cu.part_mode == PART_2NxnD) ||
402 isDiffMER(s, xB1, yB1, x0, y0)) {
403 is_available_b1 = 0;
404 }
405
406 if (is_available_a1 && is_available_b1)
407 check_MER = !COMPARE_MV_REFIDX(B1, A1);
408
409 if (is_available_b1 && check_MER)
410 mergecandlist[nb_merge_cand++] = TAB_MVF_PU(B1);
411
412 // above right spatial merge candidate
413 check_MER = 1;
414 check_B0 = PRED_BLOCK_AVAILABLE(B0);
415
416 is_available_b0 = check_B0 && AVAILABLE(cand_up_right, B0);
417
418 if (isDiffMER(s, xB0, yB0, x0, y0))
419 is_available_b0 = 0;
420
421 if (is_available_b1 && is_available_b0)
422 check_MER = !COMPARE_MV_REFIDX(B0, B1);
423
424 if (is_available_b0 && check_MER) {
425 mergecandlist[nb_merge_cand] = TAB_MVF_PU(B0);
426 if (merge_idx == nb_merge_cand)
427 return;
428 nb_merge_cand++;
429 }
430
431 // left bottom spatial merge candidate
432 check_MER = 1;
433 check_A0 = PRED_BLOCK_AVAILABLE(A0);
434
435 is_available_a0 = check_A0 && AVAILABLE(cand_bottom_left, A0);
436
437 if (isDiffMER(s, xA0, yA0, x0, y0))
438 is_available_a0 = 0;
439
440 if (is_available_a1 && is_available_a0)
441 check_MER = !COMPARE_MV_REFIDX(A0, A1);
442
443 if (is_available_a0 && check_MER) {
444 mergecandlist[nb_merge_cand] = TAB_MVF_PU(A0);
445 if (merge_idx == nb_merge_cand)
446 return;
447 nb_merge_cand++;
448 }
449
450 // above left spatial merge candidate
451 check_MER = 1;
452
453 is_available_b2 = AVAILABLE(cand_up_left, B2);
454
455 if (isDiffMER(s, xB2, yB2, x0, y0))
456 is_available_b2 = 0;
457
458 if (is_available_a1 && is_available_b2)
459 check_MER = !COMPARE_MV_REFIDX(B2, A1);
460
461 if (is_available_b1 && is_available_b2)
462 check_MER_1 = !COMPARE_MV_REFIDX(B2, B1);
463
464 if (is_available_b2 && check_MER && check_MER_1 && nb_merge_cand != 4) {
465 mergecandlist[nb_merge_cand] = TAB_MVF_PU(B2);
466 if (merge_idx == nb_merge_cand)
467 return;
468 nb_merge_cand++;
469 }
470
471 // temporal motion vector candidate
472 if (s->sh.slice_temporal_mvp_enabled_flag &&
473 nb_merge_cand < s->sh.max_num_merge_cand) {
474 Mv mv_l0_col = { 0 }, mv_l1_col = { 0 };
475 int available_l0 = temporal_luma_motion_vector(s, x0, y0, nPbW, nPbH,
476 0, &mv_l0_col, 0);
477 int available_l1 = (s->sh.slice_type == HEVC_SLICE_B) ?
478 temporal_luma_motion_vector(s, x0, y0, nPbW, nPbH,
479 0, &mv_l1_col, 1) : 0;
480
481 if (available_l0 || available_l1) {
482 mergecandlist[nb_merge_cand].is_intra = 0;
483 mergecandlist[nb_merge_cand].pred_flag[0] = available_l0;
484 mergecandlist[nb_merge_cand].pred_flag[1] = available_l1;
485 AV_ZERO16(mergecandlist[nb_merge_cand].ref_idx);
486 mergecandlist[nb_merge_cand].mv[0] = mv_l0_col;
487 mergecandlist[nb_merge_cand].mv[1] = mv_l1_col;
488
489 if (merge_idx == nb_merge_cand)
490 return;
491 nb_merge_cand++;
492 }
493 }
494
495 nb_orig_merge_cand = nb_merge_cand;
496
497 // combined bi-predictive merge candidates (applies for B slices)
498 if (s->sh.slice_type == HEVC_SLICE_B && nb_orig_merge_cand > 1 &&
499 nb_orig_merge_cand < s->sh.max_num_merge_cand) {
500 int comb_idx;
501
502 for (comb_idx = 0; nb_merge_cand < s->sh.max_num_merge_cand &&
503 comb_idx < nb_orig_merge_cand * (nb_orig_merge_cand - 1); comb_idx++) {
504 int l0_cand_idx = l0_l1_cand_idx[comb_idx][0];
505 int l1_cand_idx = l0_l1_cand_idx[comb_idx][1];
506 MvField l0_cand = mergecandlist[l0_cand_idx];
507 MvField l1_cand = mergecandlist[l1_cand_idx];
508
509 if (l0_cand.pred_flag[0] && l1_cand.pred_flag[1] &&
510 (refPicList[0].list[l0_cand.ref_idx[0]] !=
511 refPicList[1].list[l1_cand.ref_idx[1]] ||
512 AV_RN32A(&l0_cand.mv[0]) != AV_RN32A(&l1_cand.mv[1]))) {
513 mergecandlist[nb_merge_cand].ref_idx[0] = l0_cand.ref_idx[0];
514 mergecandlist[nb_merge_cand].ref_idx[1] = l1_cand.ref_idx[1];
515 mergecandlist[nb_merge_cand].pred_flag[0] = 1;
516 mergecandlist[nb_merge_cand].pred_flag[1] = 1;
517 AV_COPY32(&mergecandlist[nb_merge_cand].mv[0], &l0_cand.mv[0]);
518 AV_COPY32(&mergecandlist[nb_merge_cand].mv[1], &l1_cand.mv[1]);
519 mergecandlist[nb_merge_cand].is_intra = 0;
520 if (merge_idx == nb_merge_cand)
521 return;
522 nb_merge_cand++;
523 }
524 }
525 }
526
527 // append Zero motion vector candidates
528 while (nb_merge_cand < s->sh.max_num_merge_cand) {
529 mergecandlist[nb_merge_cand].pred_flag[0] = 1;
530 mergecandlist[nb_merge_cand].pred_flag[1] = s->sh.slice_type == HEVC_SLICE_B;
531 AV_ZERO32(mergecandlist[nb_merge_cand].mv + 0);
532 AV_ZERO32(mergecandlist[nb_merge_cand].mv + 1);
533 mergecandlist[nb_merge_cand].is_intra = 0;
534 mergecandlist[nb_merge_cand].ref_idx[0] = zero_idx < nb_refs ? zero_idx : 0;
535 mergecandlist[nb_merge_cand].ref_idx[1] = zero_idx < nb_refs ? zero_idx : 0;
536
537 if (merge_idx == nb_merge_cand)
538 return;
539 nb_merge_cand++;
540 zero_idx++;
541 }
542 }
543
544 /*
545 * 8.5.3.1.1 Derivation process of luma Mvs for merge mode
546 */
547 void ff_hevc_luma_mv_merge_mode(HEVCContext *s, int x0, int y0, int nPbW,
548 int nPbH, int log2_cb_size, int part_idx,
549 int merge_idx, MvField *mv)
550 {
551 int singleMCLFlag = 0;
552 int nCS = 1 << log2_cb_size;
553 LOCAL_ALIGNED(4, MvField, mergecand_list, [MRG_MAX_NUM_CANDS]);
554 int nPbW2 = nPbW;
555 int nPbH2 = nPbH;
556 HEVCLocalContext *lc = &s->HEVClc;
557
558 if (s->ps.pps->log2_parallel_merge_level > 2 && nCS == 8) {
559 singleMCLFlag = 1;
560 x0 = lc->cu.x;
561 y0 = lc->cu.y;
562 nPbW = nCS;
563 nPbH = nCS;
564 part_idx = 0;
565 }
566
567 ff_hevc_set_neighbour_available(s, x0, y0, nPbW, nPbH);
568 derive_spatial_merge_candidates(s, x0, y0, nPbW, nPbH, log2_cb_size,
569 singleMCLFlag, part_idx,
570 merge_idx, mergecand_list);
571
572 if (mergecand_list[merge_idx].pred_flag[0] == 1 &&
573 mergecand_list[merge_idx].pred_flag[1] == 1 &&
574 (nPbW2 + nPbH2) == 12) {
575 mergecand_list[merge_idx].ref_idx[1] = -1;
576 mergecand_list[merge_idx].pred_flag[1] = 0;
577 }
578
579 *mv = mergecand_list[merge_idx];
580 }
581
582 static av_always_inline void dist_scale(HEVCContext *s, Mv *mv,
583 int min_pu_width, int x, int y,
584 int elist, int ref_idx_curr, int ref_idx)
585 {
586 RefPicList *refPicList = s->ref->refPicList;
587 MvField *tab_mvf = s->ref->tab_mvf;
588 int ref_pic_elist = refPicList[elist].list[TAB_MVF(x, y).ref_idx[elist]];
589 int ref_pic_curr = refPicList[ref_idx_curr].list[ref_idx];
590
591 if (ref_pic_elist != ref_pic_curr) {
592 int poc_diff = s->poc - ref_pic_elist;
593 if (!poc_diff)
594 poc_diff = 1;
595 mv_scale(mv, mv, poc_diff, s->poc - ref_pic_curr);
596 }
597 }
598
599 static int mv_mp_mode_mx(HEVCContext *s, int x, int y, int pred_flag_index,
600 Mv *mv, int ref_idx_curr, int ref_idx)
601 {
602 MvField *tab_mvf = s->ref->tab_mvf;
603 int min_pu_width = s->ps.sps->min_pu_width;
604
605 RefPicList *refPicList = s->ref->refPicList;
606
607 if (TAB_MVF(x, y).pred_flag[pred_flag_index] == 1 &&
608 refPicList[pred_flag_index].list[TAB_MVF(x, y).ref_idx[pred_flag_index]] == refPicList[ref_idx_curr].list[ref_idx]) {
609 *mv = TAB_MVF(x, y).mv[pred_flag_index];
610 return 1;
611 }
612 return 0;
613 }
614
615 static int mv_mp_mode_mx_lt(HEVCContext *s, int x, int y, int pred_flag_index,
616 Mv *mv, int ref_idx_curr, int ref_idx)
617 {
618 MvField *tab_mvf = s->ref->tab_mvf;
619 int min_pu_width = s->ps.sps->min_pu_width;
620
621 RefPicList *refPicList = s->ref->refPicList;
622 int currIsLongTerm = refPicList[ref_idx_curr].isLongTerm[ref_idx];
623
624 int colIsLongTerm =
625 refPicList[pred_flag_index].isLongTerm[(TAB_MVF(x, y).ref_idx[pred_flag_index])];
626
627 if (TAB_MVF(x, y).pred_flag[pred_flag_index] &&
628 colIsLongTerm == currIsLongTerm) {
629 *mv = TAB_MVF(x, y).mv[pred_flag_index];
630 if (!currIsLongTerm)
631 dist_scale(s, mv, min_pu_width, x, y,
632 pred_flag_index, ref_idx_curr, ref_idx);
633 return 1;
634 }
635 return 0;
636 }
637
638 #define MP_MX(v, pred, mx) \
639 mv_mp_mode_mx(s, x ## v ## _pu, y ## v ## _pu, pred, \
640 &mx, ref_idx_curr, ref_idx)
641
642 #define MP_MX_LT(v, pred, mx) \
643 mv_mp_mode_mx_lt(s, x ## v ## _pu, y ## v ## _pu, pred, \
644 &mx, ref_idx_curr, ref_idx)
645
646 void ff_hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW,
647 int nPbH, int log2_cb_size, int part_idx,
648 int merge_idx, MvField *mv,
649 int mvp_lx_flag, int LX)
650 {
651 HEVCLocalContext *lc = &s->HEVClc;
652 MvField *tab_mvf = s->ref->tab_mvf;
653 int isScaledFlag_L0 = 0;
654 int availableFlagLXA0 = 0;
655 int availableFlagLXB0 = 0;
656 int numMVPCandLX = 0;
657 int min_pu_width = s->ps.sps->min_pu_width;
658
659 int xA0, yA0;
660 int xA0_pu, yA0_pu;
661 int is_available_a0;
662
663 int xA1, yA1;
664 int xA1_pu, yA1_pu;
665 int is_available_a1;
666
667 int xB0, yB0;
668 int xB0_pu, yB0_pu;
669 int is_available_b0;
670
671 int xB1, yB1;
672 int xB1_pu = 0, yB1_pu = 0;
673 int is_available_b1 = 0;
674
675 int xB2, yB2;
676 int xB2_pu = 0, yB2_pu = 0;
677 int is_available_b2 = 0;
678 Mv mvpcand_list[2] = { { 0 } };
679 Mv mxA = { 0 };
680 Mv mxB = { 0 };
681 int ref_idx_curr = 0;
682 int ref_idx = 0;
683 int pred_flag_index_l0;
684 int pred_flag_index_l1;
685 int x0b = x0 & ((1 << s->ps.sps->log2_ctb_size) - 1);
686 int y0b = y0 & ((1 << s->ps.sps->log2_ctb_size) - 1);
687
688 int cand_up = (lc->ctb_up_flag || y0b);
689 int cand_left = (lc->ctb_left_flag || x0b);
690 int cand_up_left =
691 (!x0b && !y0b) ? lc->ctb_up_left_flag : cand_left && cand_up;
692 int cand_up_right =
693 (x0b + nPbW == (1 << s->ps.sps->log2_ctb_size) ||
694 x0 + nPbW >= lc->end_of_tiles_x) ? lc->ctb_up_right_flag && !y0b
695 : cand_up;
696 int cand_bottom_left = (y0 + nPbH >= lc->end_of_tiles_y) ? 0 : cand_left;
697
698 ref_idx_curr = LX;
699 ref_idx = mv->ref_idx[LX];
700 pred_flag_index_l0 = LX;
701 pred_flag_index_l1 = !LX;
702
703 // left bottom spatial candidate
704 xA0 = x0 - 1;
705 yA0 = y0 + nPbH;
706 xA0_pu = xA0 >> s->ps.sps->log2_min_pu_size;
707 yA0_pu = yA0 >> s->ps.sps->log2_min_pu_size;
708
709 is_available_a0 = PRED_BLOCK_AVAILABLE(A0) && AVAILABLE(cand_bottom_left, A0);
710
711 //left spatial merge candidate
712 xA1 = x0 - 1;
713 yA1 = y0 + nPbH - 1;
714 xA1_pu = xA1 >> s->ps.sps->log2_min_pu_size;
715 yA1_pu = yA1 >> s->ps.sps->log2_min_pu_size;
716
717 is_available_a1 = AVAILABLE(cand_left, A1);
718 if (is_available_a0 || is_available_a1)
719 isScaledFlag_L0 = 1;
720
721 if (is_available_a0) {
722 availableFlagLXA0 = MP_MX(A0, pred_flag_index_l0, mxA);
723 if (!availableFlagLXA0)
724 availableFlagLXA0 = MP_MX(A0, pred_flag_index_l1, mxA);
725 }
726
727 if (is_available_a1 && !availableFlagLXA0) {
728 availableFlagLXA0 = MP_MX(A1, pred_flag_index_l0, mxA);
729 if (!availableFlagLXA0)
730 availableFlagLXA0 = MP_MX(A1, pred_flag_index_l1, mxA);
731 }
732
733 if (is_available_a0 && !availableFlagLXA0) {
734 availableFlagLXA0 = MP_MX_LT(A0, pred_flag_index_l0, mxA);
735 if (!availableFlagLXA0)
736 availableFlagLXA0 = MP_MX_LT(A0, pred_flag_index_l1, mxA);
737 }
738
739 if (is_available_a1 && !availableFlagLXA0) {
740 availableFlagLXA0 = MP_MX_LT(A1, pred_flag_index_l0, mxA);
741 if (!availableFlagLXA0)
742 availableFlagLXA0 = MP_MX_LT(A1, pred_flag_index_l1, mxA);
743 }
744
745 if (availableFlagLXA0 && !mvp_lx_flag) {
746 mv->mv[LX] = mxA;
747 return;
748 }
749
750 // B candidates
751 // above right spatial merge candidate
752 xB0 = x0 + nPbW;
753 yB0 = y0 - 1;
754 xB0_pu = xB0 >> s->ps.sps->log2_min_pu_size;
755 yB0_pu = yB0 >> s->ps.sps->log2_min_pu_size;
756
757 is_available_b0 = PRED_BLOCK_AVAILABLE(B0) && AVAILABLE(cand_up_right, B0);
758
759 if (is_available_b0) {
760 availableFlagLXB0 = MP_MX(B0, pred_flag_index_l0, mxB);
761 if (!availableFlagLXB0)
762 availableFlagLXB0 = MP_MX(B0, pred_flag_index_l1, mxB);
763 }
764
765 if (!availableFlagLXB0) {
766 // above spatial merge candidate
767 xB1 = x0 + nPbW - 1;
768 yB1 = y0 - 1;
769 xB1_pu = xB1 >> s->ps.sps->log2_min_pu_size;
770 yB1_pu = yB1 >> s->ps.sps->log2_min_pu_size;
771
772 is_available_b1 = AVAILABLE(cand_up, B1);
773
774 if (is_available_b1) {
775 availableFlagLXB0 = MP_MX(B1, pred_flag_index_l0, mxB);
776 if (!availableFlagLXB0)
777 availableFlagLXB0 = MP_MX(B1, pred_flag_index_l1, mxB);
778 }
779 }
780
781 if (!availableFlagLXB0) {
782 // above left spatial merge candidate
783 xB2 = x0 - 1;
784 yB2 = y0 - 1;
785 xB2_pu = xB2 >> s->ps.sps->log2_min_pu_size;
786 yB2_pu = yB2 >> s->ps.sps->log2_min_pu_size;
787 is_available_b2 = AVAILABLE(cand_up_left, B2);
788
789 if (is_available_b2) {
790 availableFlagLXB0 = MP_MX(B2, pred_flag_index_l0, mxB);
791 if (!availableFlagLXB0)
792 availableFlagLXB0 = MP_MX(B2, pred_flag_index_l1, mxB);
793 }
794 }
795
796 if (isScaledFlag_L0 == 0) {
797 if (availableFlagLXB0) {
798 availableFlagLXA0 = 1;
799 mxA = mxB;
800 }
801 availableFlagLXB0 = 0;
802
803 // XB0 and L1
804 if (is_available_b0) {
805 availableFlagLXB0 = MP_MX_LT(B0, pred_flag_index_l0, mxB);
806 if (!availableFlagLXB0)
807 availableFlagLXB0 = MP_MX_LT(B0, pred_flag_index_l1, mxB);
808 }
809
810 if (is_available_b1 && !availableFlagLXB0) {
811 availableFlagLXB0 = MP_MX_LT(B1, pred_flag_index_l0, mxB);
812 if (!availableFlagLXB0)
813 availableFlagLXB0 = MP_MX_LT(B1, pred_flag_index_l1, mxB);
814 }
815
816 if (is_available_b2 && !availableFlagLXB0) {
817 availableFlagLXB0 = MP_MX_LT(B2, pred_flag_index_l0, mxB);
818 if (!availableFlagLXB0)
819 availableFlagLXB0 = MP_MX_LT(B2, pred_flag_index_l1, mxB);
820 }
821 }
822
823 if (availableFlagLXA0)
824 mvpcand_list[numMVPCandLX++] = mxA;
825
826 if (availableFlagLXB0 && (!availableFlagLXA0 || mxA.x != mxB.x || mxA.y != mxB.y))
827 mvpcand_list[numMVPCandLX++] = mxB;
828
829 //temporal motion vector prediction candidate
830 if (numMVPCandLX < 2 && s->sh.slice_temporal_mvp_enabled_flag &&
831 mvp_lx_flag == numMVPCandLX) {
832 Mv mv_col;
833 int available_col = temporal_luma_motion_vector(s, x0, y0, nPbW,
834 nPbH, ref_idx,
835 &mv_col, LX);
836 if (available_col)
837 mvpcand_list[numMVPCandLX++] = mv_col;
838 }
839
840 // insert zero motion vectors when the number of available candidates are less than 2
841 while (numMVPCandLX < 2)
842 mvpcand_list[numMVPCandLX++] = (Mv){ 0, 0 };
843
844 mv->mv[LX].x = mvpcand_list[mvp_lx_flag].x;
845 mv->mv[LX].y = mvpcand_list[mvp_lx_flag].y;
846 }