lavc: Add coded bitstream read/write support for H.265
[libav.git] / libavcodec / cbs_h265_syntax_template.c
1 /*
2 * This file is part of Libav.
3 *
4 * Libav is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
8 *
9 * Libav is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with Libav; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 static int FUNC(rbsp_trailing_bits)(CodedBitstreamContext *ctx, RWContext *rw)
20 {
21 int err;
22 av_unused int one = 1, zero = 0;
23 xu(1, rbsp_stop_one_bit, one, 1, 1);
24 while (byte_alignment(rw) != 0)
25 xu(1, rbsp_alignment_zero_bit, zero, 0, 0);
26
27 return 0;
28 }
29
30 static int FUNC(nal_unit_header)(CodedBitstreamContext *ctx, RWContext *rw,
31 H265RawNALUnitHeader *current,
32 int expected_nal_unit_type)
33 {
34 int err;
35
36 u(1, forbidden_zero_bit, 0, 0);
37
38 if (expected_nal_unit_type >= 0)
39 u(6, nal_unit_type, expected_nal_unit_type,
40 expected_nal_unit_type);
41 else
42 u(6, nal_unit_type, 0, 63);
43
44 u(6, nuh_layer_id, 0, 62);
45 u(3, nuh_temporal_id_plus1, 1, 7);
46
47 return 0;
48 }
49
50 static int FUNC(byte_alignment)(CodedBitstreamContext *ctx, RWContext *rw)
51 {
52 int err;
53 av_unused int one = 1, zero = 0;
54 xu(1, alignment_bit_equal_to_one, one, 1, 1);
55 while (byte_alignment(rw) != 0)
56 xu(1, alignment_bit_equal_to_zero, zero, 0, 0);
57
58 return 0;
59 }
60
61 static int FUNC(extension_data)(CodedBitstreamContext *ctx, RWContext *rw,
62 H265RawPSExtensionData *current)
63 {
64 int err;
65 size_t k;
66 #ifdef READ
67 BitstreamContext start;
68 uint8_t bit;
69 start = *rw;
70 for (k = 0; cbs_h2645_read_more_rbsp_data(rw); k++);
71 current->bit_length = k;
72 if (k > 0) {
73 *rw = start;
74 allocate(current->data, (current->bit_length + 7) / 8);
75 for (k = 0; k < current->bit_length; k++) {
76 xu(1, extension_data, bit, 0, 1);
77 current->data[k / 8] |= bit << (7 - k % 8);
78 }
79 }
80 #else
81 for (k = 0; k < current->bit_length; k++)
82 xu(1, extension_data, current->data[k / 8] >> (7 - k % 8), 0, 1);
83 #endif
84 return 0;
85 }
86
87 static int FUNC(profile_tier_level)(CodedBitstreamContext *ctx, RWContext *rw,
88 H265RawProfileTierLevel *current,
89 int profile_present_flag,
90 int max_num_sub_layers_minus1)
91 {
92 av_unused unsigned int zero = 0;
93 int err, i, j;
94
95 if (profile_present_flag) {
96 u(2, general_profile_space, 0, 0);
97 flag(general_tier_flag);
98 u(5, general_profile_idc, 0, 31);
99
100 for (j = 0; j < 32; j++)
101 flag(general_profile_compatibility_flag[j]);
102
103 flag(general_progressive_source_flag);
104 flag(general_interlaced_source_flag);
105 flag(general_non_packed_constraint_flag);
106 flag(general_frame_only_constraint_flag);
107
108 #define profile_compatible(x) (current->general_profile_idc == (x) || \
109 current->general_profile_compatibility_flag[x])
110 if (profile_compatible(4) || profile_compatible(5) ||
111 profile_compatible(6) || profile_compatible(7) ||
112 profile_compatible(8) || profile_compatible(9) ||
113 profile_compatible(10)) {
114 flag(general_max_12bit_constraint_flag);
115 flag(general_max_10bit_constraint_flag);
116 flag(general_max_8bit_constraint_flag);
117 flag(general_max_422chroma_constraint_flag);
118 flag(general_max_420chroma_constraint_flag);
119 flag(general_max_monochrome_constraint_flag);
120 flag(general_intra_constraint_flag);
121 flag(general_one_picture_only_constraint_flag);
122 flag(general_lower_bit_rate_constraint_flag);
123
124 if (profile_compatible(5) || profile_compatible(9) ||
125 profile_compatible(10)) {
126 flag(general_max_14bit_constraint_flag);
127 xu(24, general_reserved_zero_33bits, zero, 0, 0);
128 xu(9, general_reserved_zero_33bits, zero, 0, 0);
129 } else {
130 xu(24, general_reserved_zero_34bits, zero, 0, 0);
131 xu(10, general_reserved_zero_34bits, zero, 0, 0);
132 }
133 } else {
134 xu(24, general_reserved_zero_43bits, zero, 0, 0);
135 xu(19, general_reserved_zero_43bits, zero, 0, 0);
136 }
137
138 if (profile_compatible(1) || profile_compatible(2) ||
139 profile_compatible(3) || profile_compatible(4) ||
140 profile_compatible(5) || profile_compatible(9)) {
141 flag(general_inbld_flag);
142 } else {
143 xu(1, general_reserved_zero_bit, zero, 0, 0);
144 }
145 #undef profile_compatible
146 }
147
148 u(8, general_level_idc, 0, 255);
149
150 for (i = 0; i < max_num_sub_layers_minus1; i++) {
151 flag(sub_layer_profile_present_flag[i]);
152 flag(sub_layer_level_present_flag[i]);
153 }
154
155 if (max_num_sub_layers_minus1 > 0) {
156 for (i = max_num_sub_layers_minus1; i < 8; i++) {
157 av_unused int zero = 0;
158 xu(2, reserved_zero_2bits, zero, 0, 0);
159 }
160 }
161
162 for (i = 0; i < max_num_sub_layers_minus1; i++) {
163 if (current->sub_layer_profile_present_flag[i])
164 return AVERROR_PATCHWELCOME;
165 if (current->sub_layer_level_present_flag[i])
166 return AVERROR_PATCHWELCOME;
167 }
168
169 return 0;
170 }
171
172 static int FUNC(sub_layer_hrd_parameters)(CodedBitstreamContext *ctx, RWContext *rw,
173 H265RawHRDParameters *hrd,
174 int nal, int sub_layer_id)
175 {
176 H265RawSubLayerHRDParameters *current;
177 int err, i;
178
179 if (nal)
180 current = &hrd->nal_sub_layer_hrd_parameters[sub_layer_id];
181 else
182 current = &hrd->vcl_sub_layer_hrd_parameters[sub_layer_id];
183
184 for (i = 0; i <= hrd->cpb_cnt_minus1[sub_layer_id]; i++) {
185 ue(bit_rate_value_minus1[i], 0, UINT32_MAX - 1);
186 ue(cpb_size_value_minus1[i], 0, UINT32_MAX - 1);
187 if (hrd->sub_pic_hrd_params_present_flag) {
188 ue(cpb_size_du_value_minus1[i], 0, UINT32_MAX - 1);
189 ue(bit_rate_du_value_minus1[i], 0, UINT32_MAX - 1);
190 }
191 flag(cbr_flag[i]);
192 }
193
194 return 0;
195 }
196
197 static int FUNC(hrd_parameters)(CodedBitstreamContext *ctx, RWContext *rw,
198 H265RawHRDParameters *current, int common_inf_present_flag,
199 int max_num_sub_layers_minus1)
200 {
201 int err, i;
202
203 if (common_inf_present_flag) {
204 flag(nal_hrd_parameters_present_flag);
205 flag(vcl_hrd_parameters_present_flag);
206
207 if (current->nal_hrd_parameters_present_flag ||
208 current->vcl_hrd_parameters_present_flag) {
209 flag(sub_pic_hrd_params_present_flag);
210 if (current->sub_pic_hrd_params_present_flag) {
211 u(8, tick_divisor_minus2, 0, 255);
212 u(5, du_cpb_removal_delay_increment_length_minus1, 0, 31);
213 flag(sub_pic_cpb_params_in_pic_timing_sei_flag);
214 u(5, dpb_output_delay_du_length_minus1, 0, 31);
215 }
216
217 u(4, bit_rate_scale, 0, 15);
218 u(4, cpb_size_scale, 0, 15);
219 if (current->sub_pic_hrd_params_present_flag)
220 u(4, cpb_size_du_scale, 0, 15);
221
222 u(5, initial_cpb_removal_delay_length_minus1, 0, 31);
223 u(5, au_cpb_removal_delay_length_minus1, 0, 31);
224 u(5, dpb_output_delay_length_minus1, 0, 31);
225 } else {
226 infer(sub_pic_hrd_params_present_flag, 0);
227
228 infer(initial_cpb_removal_delay_length_minus1, 23);
229 infer(au_cpb_removal_delay_length_minus1, 23);
230 infer(dpb_output_delay_length_minus1, 23);
231 }
232 }
233
234 for (i = 0; i <= max_num_sub_layers_minus1; i++) {
235 flag(fixed_pic_rate_general_flag[i]);
236
237 if (!current->fixed_pic_rate_general_flag[i])
238 flag(fixed_pic_rate_within_cvs_flag[i]);
239 else
240 infer(fixed_pic_rate_within_cvs_flag[i], 1);
241
242 if (current->fixed_pic_rate_within_cvs_flag[i]) {
243 ue(elemental_duration_in_tc_minus1[i], 0, 2047);
244 infer(low_delay_hrd_flag[i], 0);
245 } else
246 flag(low_delay_hrd_flag[i]);
247
248 if (!current->low_delay_hrd_flag[i])
249 ue(cpb_cnt_minus1[i], 0, 31);
250 else
251 infer(cpb_cnt_minus1[i], 0);
252
253 if (current->nal_hrd_parameters_present_flag)
254 CHECK(FUNC(sub_layer_hrd_parameters)(ctx, rw, current, 0, i));
255 if (current->vcl_hrd_parameters_present_flag)
256 CHECK(FUNC(sub_layer_hrd_parameters)(ctx, rw, current, 1, i));
257 }
258
259 return 0;
260 }
261
262 static int FUNC(vui_parameters)(CodedBitstreamContext *ctx, RWContext *rw,
263 H265RawVUI *current, const H265RawSPS *sps)
264 {
265 int err;
266
267 flag(aspect_ratio_info_present_flag);
268 if (current->aspect_ratio_info_present_flag) {
269 u(8, aspect_ratio_idc, 0, 255);
270 if (current->aspect_ratio_idc == 255) {
271 u(16, sar_width, 0, 65535);
272 u(16, sar_height, 0, 65535);
273 }
274 } else {
275 infer(aspect_ratio_idc, 0);
276 }
277
278 flag(overscan_info_present_flag);
279 if (current->overscan_info_present_flag)
280 flag(overscan_appropriate_flag);
281
282 flag(video_signal_type_present_flag);
283 if (current->video_signal_type_present_flag) {
284 u(3, video_format, 0, 7);
285 flag(video_full_range_flag);
286 flag(colour_description_present_flag);
287 if (current->colour_description_present_flag) {
288 u(8, colour_primaries, 0, 255);
289 u(8, transfer_characteristics, 0, 255);
290 u(8, matrix_coefficients, 0, 255);
291 } else {
292 infer(colour_primaries, 2);
293 infer(transfer_characteristics, 2);
294 infer(matrix_coefficients, 2);
295 }
296 } else {
297 infer(video_format, 5);
298 infer(video_full_range_flag, 0);
299 infer(colour_primaries, 2);
300 infer(transfer_characteristics, 2);
301 infer(matrix_coefficients, 2);
302 }
303
304 flag(chroma_loc_info_present_flag);
305 if (current->chroma_loc_info_present_flag) {
306 ue(chroma_sample_loc_type_top_field, 0, 5);
307 ue(chroma_sample_loc_type_bottom_field, 0, 5);
308 } else {
309 infer(chroma_sample_loc_type_top_field, 0);
310 infer(chroma_sample_loc_type_bottom_field, 0);
311 }
312
313 flag(neutral_chroma_indication_flag);
314 flag(field_seq_flag);
315 flag(frame_field_info_present_flag);
316
317 flag(default_display_window_flag);
318 if (current->default_display_window_flag) {
319 ue(def_disp_win_left_offset, 0, 16384);
320 ue(def_disp_win_right_offset, 0, 16384);
321 ue(def_disp_win_top_offset, 0, 16384);
322 ue(def_disp_win_bottom_offset, 0, 16384);
323 }
324
325 flag(vui_timing_info_present_flag);
326 if (current->vui_timing_info_present_flag) {
327 u(32, vui_num_units_in_tick, 1, UINT32_MAX);
328 u(32, vui_time_scale, 1, UINT32_MAX);
329 flag(vui_poc_proportional_to_timing_flag);
330 if (current->vui_poc_proportional_to_timing_flag)
331 ue(vui_num_ticks_poc_diff_one_minus1, 0, UINT32_MAX - 1);
332
333 flag(vui_hrd_parameters_present_flag);
334 if (current->vui_hrd_parameters_present_flag) {
335 CHECK(FUNC(hrd_parameters)(ctx, rw, &current->hrd_parameters,
336 1, sps->sps_max_sub_layers_minus1));
337 }
338 }
339
340 flag(bitstream_restriction_flag);
341 if (current->bitstream_restriction_flag) {
342 flag(tiles_fixed_structure_flag);
343 flag(motion_vectors_over_pic_boundaries_flag);
344 flag(restricted_ref_pic_lists_flag);
345 ue(min_spatial_segmentation_idc, 0, 4095);
346 ue(max_bytes_per_pic_denom, 0, 16);
347 ue(max_bits_per_min_cu_denom, 0, 16);
348 ue(log2_max_mv_length_horizontal, 0, 16);
349 ue(log2_max_mv_length_vertical, 0, 16);
350 } else {
351 infer(tiles_fixed_structure_flag, 0);
352 infer(motion_vectors_over_pic_boundaries_flag, 1);
353 infer(min_spatial_segmentation_idc, 0);
354 infer(max_bytes_per_pic_denom, 2);
355 infer(max_bits_per_min_cu_denom, 1);
356 infer(log2_max_mv_length_horizontal, 15);
357 infer(log2_max_mv_length_vertical, 15);
358 }
359
360 return 0;
361 }
362
363 static int FUNC(vps)(CodedBitstreamContext *ctx, RWContext *rw,
364 H265RawVPS *current)
365 {
366 int err, i, j;
367
368 HEADER("Video Parameter Set");
369
370 CHECK(FUNC(nal_unit_header)(ctx, rw, &current->nal_unit_header, HEVC_NAL_VPS));
371
372 u(4, vps_video_parameter_set_id, 0, 15);
373
374 flag(vps_base_layer_internal_flag);
375 flag(vps_base_layer_available_flag);
376 u(6, vps_max_layers_minus1, 0, HEVC_MAX_LAYERS - 1);
377 u(3, vps_max_sub_layers_minus1, 0, HEVC_MAX_SUB_LAYERS - 1);
378 flag(vps_temporal_id_nesting_flag);
379
380 if (current->vps_max_sub_layers_minus1 == 0 &&
381 current->vps_temporal_id_nesting_flag != 1) {
382 av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid stream: "
383 "vps_temporal_id_nesting_flag must be 1 if "
384 "vps_max_sub_layers_minus1 is 0.\n");
385 return AVERROR_INVALIDDATA;
386 }
387
388 {
389 av_unused uint16_t ffff = 0xffff;
390 xu(16, vps_reserved_0xffff_16bits, ffff, 0xffff, 0xffff);
391 }
392
393 CHECK(FUNC(profile_tier_level)(ctx, rw, &current->profile_tier_level,
394 1, current->vps_max_sub_layers_minus1));
395
396 flag(vps_sub_layer_ordering_info_present_flag);
397 for (i = (current->vps_sub_layer_ordering_info_present_flag ?
398 0 : current->vps_max_sub_layers_minus1);
399 i <= current->vps_max_sub_layers_minus1; i++) {
400 ue(vps_max_dec_pic_buffering_minus1[i], 0, HEVC_MAX_DPB_SIZE - 1);
401 ue(vps_max_num_reorder_pics[i], 0, current->vps_max_dec_pic_buffering_minus1[i]);
402 ue(vps_max_latency_increase_plus1[i], 0, UINT32_MAX - 1);
403 }
404 if (!current->vps_sub_layer_ordering_info_present_flag) {
405 for (i = 0; i < current->vps_max_sub_layers_minus1; i++) {
406 infer(vps_max_dec_pic_buffering_minus1[i],
407 current->vps_max_dec_pic_buffering_minus1[current->vps_max_sub_layers_minus1]);
408 infer(vps_max_num_reorder_pics[i],
409 current->vps_max_num_reorder_pics[current->vps_max_sub_layers_minus1]);
410 infer(vps_max_latency_increase_plus1[i],
411 current->vps_max_latency_increase_plus1[current->vps_max_sub_layers_minus1]);
412 }
413 }
414
415 u(6, vps_max_layer_id, 0, HEVC_MAX_LAYERS - 1);
416 ue(vps_num_layer_sets_minus1, 0, HEVC_MAX_LAYER_SETS - 1);
417 for (i = 1; i <= current->vps_num_layer_sets_minus1; i++) {
418 for (j = 0; j <= current->vps_max_layer_id; j++)
419 flag(layer_id_included_flag[i][j]);
420 }
421 for (j = 0; j <= current->vps_max_layer_id; j++)
422 infer(layer_id_included_flag[0][j], j == 0);
423
424 flag(vps_timing_info_present_flag);
425 if (current->vps_timing_info_present_flag) {
426 u(32, vps_num_units_in_tick, 1, UINT32_MAX);
427 u(32, vps_time_scale, 1, UINT32_MAX);
428 flag(vps_poc_proportional_to_timing_flag);
429 if (current->vps_poc_proportional_to_timing_flag)
430 ue(vps_num_ticks_poc_diff_one_minus1, 0, UINT32_MAX - 1);
431 ue(vps_num_hrd_parameters, 0, current->vps_num_layer_sets_minus1 + 1);
432 for (i = 0; i < current->vps_num_hrd_parameters; i++) {
433 ue(hrd_layer_set_idx[i],
434 current->vps_base_layer_internal_flag ? 0 : 1,
435 current->vps_num_layer_sets_minus1);
436 if (i > 0)
437 flag(cprms_present_flag[i]);
438 else
439 infer(cprms_present_flag[0], 1);
440
441 CHECK(FUNC(hrd_parameters)(ctx, rw, &current->hrd_parameters[i],
442 current->cprms_present_flag[i],
443 current->vps_max_sub_layers_minus1));
444 }
445 }
446
447 flag(vps_extension_flag);
448 if (current->vps_extension_flag)
449 CHECK(FUNC(extension_data)(ctx, rw, &current->extension_data));
450
451 CHECK(FUNC(rbsp_trailing_bits)(ctx, rw));
452
453 return 0;
454 }
455
456 static int FUNC(st_ref_pic_set)(CodedBitstreamContext *ctx, RWContext *rw,
457 H265RawSTRefPicSet *current, int st_rps_idx,
458 const H265RawSPS *sps)
459 {
460 int err, i, j;
461
462 if (st_rps_idx != 0)
463 flag(inter_ref_pic_set_prediction_flag);
464 else
465 infer(inter_ref_pic_set_prediction_flag, 0);
466
467 if (current->inter_ref_pic_set_prediction_flag) {
468 unsigned int ref_rps_idx, num_delta_pocs;
469 const H265RawSTRefPicSet *ref;
470 int delta_rps, d_poc;
471 int ref_delta_poc_s0[HEVC_MAX_REFS], ref_delta_poc_s1[HEVC_MAX_REFS];
472 int delta_poc_s0[HEVC_MAX_REFS], delta_poc_s1[HEVC_MAX_REFS];
473 uint8_t used_by_curr_pic_s0[HEVC_MAX_REFS],
474 used_by_curr_pic_s1[HEVC_MAX_REFS];
475
476 if (st_rps_idx == sps->num_short_term_ref_pic_sets)
477 ue(delta_idx_minus1, 0, st_rps_idx - 1);
478 else
479 infer(delta_idx_minus1, 0);
480
481 ref_rps_idx = st_rps_idx - (current->delta_idx_minus1 + 1);
482 ref = &sps->st_ref_pic_set[ref_rps_idx];
483 num_delta_pocs = ref->num_negative_pics + ref->num_positive_pics;
484
485 flag(delta_rps_sign);
486 ue(abs_delta_rps_minus1, 0, INT16_MAX);
487 delta_rps = (1 - 2 * current->delta_rps_sign) *
488 (current->abs_delta_rps_minus1 + 1);
489
490 for (j = 0; j <= num_delta_pocs; j++) {
491 flag(used_by_curr_pic_flag[j]);
492 if (!current->used_by_curr_pic_flag[j])
493 flag(use_delta_flag[j]);
494 else
495 infer(use_delta_flag[j], 1);
496 }
497
498 // Since the stored form of an RPS here is actually the delta-step
499 // form used when inter_ref_pic_set_prediction_flag is not set, we
500 // need to reconstruct that here in order to be able to refer to
501 // the RPS later (which is required for parsing, because we don't
502 // even know what syntax elements appear without it). Therefore,
503 // this code takes the delta-step form of the reference set, turns
504 // it into the delta-array form, applies the prediction process of
505 // 7.4.8, converts the result back to the delta-step form, and
506 // stores that as the current set for future use. Note that the
507 // inferences here mean that writers using prediction will need
508 // to fill in the delta-step values correctly as well - since the
509 // whole RPS prediction process is somewhat overly sophisticated,
510 // this hopefully forms a useful check for them to ensure their
511 // predicted form actually matches what was intended rather than
512 // an onerous additional requirement.
513
514 d_poc = 0;
515 for (i = 0; i < ref->num_negative_pics; i++) {
516 d_poc -= ref->delta_poc_s0_minus1[i] + 1;
517 ref_delta_poc_s0[i] = d_poc;
518 }
519 d_poc = 0;
520 for (i = 0; i < ref->num_positive_pics; i++) {
521 d_poc += ref->delta_poc_s1_minus1[i] + 1;
522 ref_delta_poc_s1[i] = d_poc;
523 }
524
525 i = 0;
526 for (j = ref->num_positive_pics - 1; j >= 0; j--) {
527 d_poc = ref_delta_poc_s1[j] + delta_rps;
528 if (d_poc < 0 && current->use_delta_flag[ref->num_negative_pics + j]) {
529 delta_poc_s0[i] = d_poc;
530 used_by_curr_pic_s0[i++] =
531 current->used_by_curr_pic_flag[ref->num_negative_pics + j];
532 }
533 }
534 if (delta_rps < 0 && current->use_delta_flag[num_delta_pocs]) {
535 delta_poc_s0[i] = delta_rps;
536 used_by_curr_pic_s0[i++] =
537 current->used_by_curr_pic_flag[num_delta_pocs];
538 }
539 for (j = 0; j < ref->num_negative_pics; j++) {
540 d_poc = ref_delta_poc_s0[j] + delta_rps;
541 if (d_poc < 0 && current->use_delta_flag[j]) {
542 delta_poc_s0[i] = d_poc;
543 used_by_curr_pic_s0[i++] = current->used_by_curr_pic_flag[j];
544 }
545 }
546
547 infer(num_negative_pics, i);
548 for (i = 0; i < current->num_negative_pics; i++) {
549 infer(delta_poc_s0_minus1[i],
550 -(delta_poc_s0[i] - (i == 0 ? 0 : delta_poc_s0[i - 1])) - 1);
551 infer(used_by_curr_pic_s0_flag[i], used_by_curr_pic_s0[i]);
552 }
553
554 i = 0;
555 for (j = ref->num_negative_pics - 1; j >= 0; j--) {
556 d_poc = ref_delta_poc_s0[j] + delta_rps;
557 if (d_poc > 0 && current->use_delta_flag[j]) {
558 delta_poc_s1[i] = d_poc;
559 used_by_curr_pic_s1[i++] = current->used_by_curr_pic_flag[j];
560 }
561 }
562 if (delta_rps > 0 && current->use_delta_flag[num_delta_pocs]) {
563 delta_poc_s1[i] = delta_rps;
564 used_by_curr_pic_s1[i++] =
565 current->used_by_curr_pic_flag[num_delta_pocs];
566 }
567 for (j = 0; j < ref->num_positive_pics; j++) {
568 d_poc = ref_delta_poc_s1[j] + delta_rps;
569 if (d_poc > 0 && current->use_delta_flag[ref->num_negative_pics + j]) {
570 delta_poc_s1[i] = d_poc;
571 used_by_curr_pic_s1[i++] =
572 current->used_by_curr_pic_flag[ref->num_negative_pics + j];
573 }
574 }
575
576 infer(num_positive_pics, i);
577 for (i = 0; i < current->num_positive_pics; i++) {
578 infer(delta_poc_s1_minus1[i],
579 delta_poc_s1[i] - (i == 0 ? 0 : delta_poc_s1[i - 1]) - 1);
580 infer(used_by_curr_pic_s1_flag[i], used_by_curr_pic_s1[i]);
581 }
582
583 } else {
584 ue(num_negative_pics, 0, 15);
585 ue(num_positive_pics, 0, 15 - current->num_negative_pics);
586
587 for (i = 0; i < current->num_negative_pics; i++) {
588 ue(delta_poc_s0_minus1[i], 0, INT16_MAX);
589 flag(used_by_curr_pic_s0_flag[i]);
590 }
591
592 for (i = 0; i < current->num_positive_pics; i++) {
593 ue(delta_poc_s1_minus1[i], 0, INT16_MAX);
594 flag(used_by_curr_pic_s1_flag[i]);
595 }
596 }
597
598 return 0;
599 }
600
601 static int FUNC(scaling_list_data)(CodedBitstreamContext *ctx, RWContext *rw,
602 H265RawScalingList *current)
603 {
604 int sizeId, matrixId;
605 int err, n, i;
606
607 for (sizeId = 0; sizeId < 4; sizeId++) {
608 for (matrixId = 0; matrixId < 6; matrixId += (sizeId == 3 ? 3 : 1)) {
609 flag(scaling_list_pred_mode_flag[sizeId][matrixId]);
610 if (!current->scaling_list_pred_mode_flag[sizeId][matrixId]) {
611 ue(scaling_list_pred_matrix_id_delta[sizeId][matrixId],
612 0, sizeId == 3 ? matrixId / 3 : matrixId);
613 } else {
614 n = FFMIN(64, 1 << (4 + (sizeId << 1)));
615 if (sizeId > 1)
616 se(scaling_list_dc_coef_minus8[sizeId - 2][matrixId], -7, +247);
617 for (i = 0; i < n; i++) {
618 xse(scaling_list_delta_coeff,
619 current->scaling_list_delta_coeff[sizeId][matrixId][i],
620 -128, +127);
621 }
622 }
623 }
624 }
625
626 return 0;
627 }
628
629 static int FUNC(sps_range_extension)(CodedBitstreamContext *ctx, RWContext *rw,
630 H265RawSPS *current)
631 {
632 int err;
633
634 flag(transform_skip_rotation_enabled_flag);
635 flag(transform_skip_context_enabled_flag);
636 flag(implicit_rdpcm_enabled_flag);
637 flag(explicit_rdpcm_enabled_flag);
638 flag(extended_precision_processing_flag);
639 flag(intra_smoothing_disabled_flag);
640 flag(high_precision_offsets_enabled_flag);
641 flag(persistent_rice_adaptation_enabled_flag);
642 flag(cabac_bypass_alignment_enabled_flag);
643
644 return 0;
645 }
646
647 static int FUNC(sps_scc_extension)(CodedBitstreamContext *ctx, RWContext *rw,
648 H265RawSPS *current)
649 {
650 int err, comp, i;
651
652 flag(sps_curr_pic_ref_enabled_flag);
653
654 flag(palette_mode_enabled_flag);
655 if (current->palette_mode_enabled_flag) {
656 ue(palette_max_size, 0, 64);
657 ue(delta_palette_max_predictor_size, 0, 128);
658
659 flag(sps_palette_predictor_initializer_present_flag);
660 if (current->sps_palette_predictor_initializer_present_flag) {
661 ue(sps_num_palette_predictor_initializer_minus1, 0, 128);
662 for (comp = 0; comp < (current->chroma_format_idc ? 3 : 1); comp++) {
663 int bit_depth = comp == 0 ? current->bit_depth_luma_minus8 + 8
664 : current->bit_depth_chroma_minus8 + 8;
665 for (i = 0; i <= current->sps_num_palette_predictor_initializer_minus1; i++)
666 u(bit_depth, sps_palette_predictor_initializers[comp][i],
667 0, (1 << bit_depth) - 1);
668 }
669 }
670 }
671
672 u(2, motion_vector_resolution_control_idc, 0, 2);
673 flag(intra_boundary_filtering_disable_flag);
674
675 return 0;
676 }
677
678 static int FUNC(sps)(CodedBitstreamContext *ctx, RWContext *rw,
679 H265RawSPS *current)
680 {
681 CodedBitstreamH265Context *h265 = ctx->priv_data;
682 const H265RawVPS *vps;
683 int err, i;
684 unsigned int min_cb_log2_size_y, ctb_log2_size_y,
685 min_cb_size_y, min_tb_log2_size_y;
686
687 HEADER("Sequence Parameter Set");
688
689 CHECK(FUNC(nal_unit_header)(ctx, rw, &current->nal_unit_header, HEVC_NAL_SPS));
690
691 u(4, sps_video_parameter_set_id, 0, 15);
692 h265->active_vps = vps = h265->vps[current->sps_video_parameter_set_id];
693
694 u(3, sps_max_sub_layers_minus1, 0, HEVC_MAX_SUB_LAYERS - 1);
695 flag(sps_temporal_id_nesting_flag);
696 if (vps) {
697 if (vps->vps_max_sub_layers_minus1 > current->sps_max_sub_layers_minus1) {
698 av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid stream: "
699 "sps_max_sub_layers_minus1 (%d) must be less than or equal to "
700 "vps_max_sub_layers_minus1 (%d).\n",
701 vps->vps_max_sub_layers_minus1,
702 current->sps_max_sub_layers_minus1);
703 return AVERROR_INVALIDDATA;
704 }
705 if (vps->vps_temporal_id_nesting_flag &&
706 !current->sps_temporal_id_nesting_flag) {
707 av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid stream: "
708 "sps_temporal_id_nesting_flag must be 1 if "
709 "vps_temporal_id_nesting_flag is 1.\n");
710 return AVERROR_INVALIDDATA;
711 }
712 }
713
714 CHECK(FUNC(profile_tier_level)(ctx, rw, &current->profile_tier_level,
715 1, current->sps_max_sub_layers_minus1));
716
717 ue(sps_seq_parameter_set_id, 0, 15);
718
719 ue(chroma_format_idc, 0, 3);
720 if (current->chroma_format_idc == 3)
721 flag(separate_colour_plane_flag);
722 else
723 infer(separate_colour_plane_flag, 0);
724
725 ue(pic_width_in_luma_samples, 1, HEVC_MAX_WIDTH);
726 ue(pic_height_in_luma_samples, 1, HEVC_MAX_HEIGHT);
727
728 flag(conformance_window_flag);
729 if (current->conformance_window_flag) {
730 ue(conf_win_left_offset, 0, current->pic_width_in_luma_samples);
731 ue(conf_win_right_offset, 0, current->pic_width_in_luma_samples);
732 ue(conf_win_top_offset, 0, current->pic_height_in_luma_samples);
733 ue(conf_win_bottom_offset, 0, current->pic_height_in_luma_samples);
734 } else {
735 infer(conf_win_left_offset, 0);
736 infer(conf_win_right_offset, 0);
737 infer(conf_win_top_offset, 0);
738 infer(conf_win_bottom_offset, 0);
739 }
740
741 ue(bit_depth_luma_minus8, 0, 8);
742 ue(bit_depth_chroma_minus8, 0, 8);
743
744 ue(log2_max_pic_order_cnt_lsb_minus4, 0, 12);
745
746 flag(sps_sub_layer_ordering_info_present_flag);
747 for (i = (current->sps_sub_layer_ordering_info_present_flag ?
748 0 : current->sps_max_sub_layers_minus1);
749 i <= current->sps_max_sub_layers_minus1; i++) {
750 ue(sps_max_dec_pic_buffering_minus1[i], 0, HEVC_MAX_DPB_SIZE - 1);
751 ue(sps_max_num_reorder_pics[i], 0, current->sps_max_dec_pic_buffering_minus1[i]);
752 ue(sps_max_latency_increase_plus1[i], 0, UINT32_MAX - 1);
753 }
754 if (!current->sps_sub_layer_ordering_info_present_flag) {
755 for (i = 0; i < current->sps_max_sub_layers_minus1; i++) {
756 infer(sps_max_dec_pic_buffering_minus1[i],
757 current->sps_max_dec_pic_buffering_minus1[current->sps_max_sub_layers_minus1]);
758 infer(sps_max_num_reorder_pics[i],
759 current->sps_max_num_reorder_pics[current->sps_max_sub_layers_minus1]);
760 infer(sps_max_latency_increase_plus1[i],
761 current->sps_max_latency_increase_plus1[current->sps_max_sub_layers_minus1]);
762 }
763 }
764
765 ue(log2_min_luma_coding_block_size_minus3, 0, 3);
766 min_cb_log2_size_y = current->log2_min_luma_coding_block_size_minus3 + 3;
767
768 ue(log2_diff_max_min_luma_coding_block_size, 0, 3);
769 ctb_log2_size_y = min_cb_log2_size_y +
770 current->log2_diff_max_min_luma_coding_block_size;
771
772 min_cb_size_y = 1 << min_cb_log2_size_y;
773 if (current->pic_width_in_luma_samples % min_cb_size_y ||
774 current->pic_height_in_luma_samples % min_cb_size_y) {
775 av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid dimensions: %ux%u not divisible "
776 "by MinCbSizeY = %u.\n", current->pic_width_in_luma_samples,
777 current->pic_height_in_luma_samples, min_cb_size_y);
778 return AVERROR_INVALIDDATA;
779 }
780
781 ue(log2_min_luma_transform_block_size_minus2, 0, min_cb_log2_size_y - 3);
782 min_tb_log2_size_y = current->log2_min_luma_transform_block_size_minus2 + 2;
783
784 ue(log2_diff_max_min_luma_transform_block_size,
785 0, FFMIN(ctb_log2_size_y, 5) - min_tb_log2_size_y);
786
787 ue(max_transform_hierarchy_depth_inter,
788 0, ctb_log2_size_y - min_tb_log2_size_y);
789 ue(max_transform_hierarchy_depth_intra,
790 0, ctb_log2_size_y - min_tb_log2_size_y);
791
792 flag(scaling_list_enabled_flag);
793 if (current->scaling_list_enabled_flag) {
794 flag(sps_scaling_list_data_present_flag);
795 if (current->sps_scaling_list_data_present_flag)
796 CHECK(FUNC(scaling_list_data)(ctx, rw, &current->scaling_list));
797 } else {
798 infer(sps_scaling_list_data_present_flag, 0);
799 }
800
801 flag(amp_enabled_flag);
802 flag(sample_adaptive_offset_enabled_flag);
803
804 flag(pcm_enabled_flag);
805 if (current->pcm_enabled_flag) {
806 u(4, pcm_sample_bit_depth_luma_minus1,
807 0, current->bit_depth_luma_minus8 + 8 - 1);
808 u(4, pcm_sample_bit_depth_chroma_minus1,
809 0, current->bit_depth_chroma_minus8 + 8 - 1);
810
811 ue(log2_min_pcm_luma_coding_block_size_minus3,
812 FFMIN(min_cb_log2_size_y, 5) - 3, FFMIN(ctb_log2_size_y, 5) - 3);
813 ue(log2_diff_max_min_pcm_luma_coding_block_size,
814 0, FFMIN(ctb_log2_size_y, 5) - (current->log2_min_pcm_luma_coding_block_size_minus3 + 3));
815
816 flag(pcm_loop_filter_disabled_flag);
817 }
818
819 ue(num_short_term_ref_pic_sets, 0, HEVC_MAX_SHORT_TERM_REF_PIC_SETS);
820 for (i = 0; i < current->num_short_term_ref_pic_sets; i++)
821 CHECK(FUNC(st_ref_pic_set)(ctx, rw, &current->st_ref_pic_set[i], i, current));
822
823 flag(long_term_ref_pics_present_flag);
824 if (current->long_term_ref_pics_present_flag) {
825 ue(num_long_term_ref_pics_sps, 0, HEVC_MAX_LONG_TERM_REF_PICS);
826 for (i = 0; i < current->num_long_term_ref_pics_sps; i++) {
827 u(current->log2_max_pic_order_cnt_lsb_minus4 + 4,
828 lt_ref_pic_poc_lsb_sps[i],
829 0, (1 << (current->log2_max_pic_order_cnt_lsb_minus4 + 4)) - 1);
830 flag(used_by_curr_pic_lt_sps_flag[i]);
831 }
832 }
833
834 flag(sps_temporal_mvp_enabled_flag);
835 flag(strong_intra_smoothing_enabled_flag);
836
837 flag(vui_parameters_present_flag);
838 if (current->vui_parameters_present_flag)
839 CHECK(FUNC(vui_parameters)(ctx, rw, &current->vui, current));
840
841 flag(sps_extension_present_flag);
842 if (current->sps_extension_present_flag) {
843 flag(sps_range_extension_flag);
844 flag(sps_multilayer_extension_flag);
845 flag(sps_3d_extension_flag);
846 flag(sps_scc_extension_flag);
847 u(4, sps_extension_4bits, 0, (1 << 4) - 1);
848 }
849
850 if (current->sps_range_extension_flag)
851 CHECK(FUNC(sps_range_extension)(ctx, rw, current));
852 if (current->sps_multilayer_extension_flag)
853 return AVERROR_PATCHWELCOME;
854 if (current->sps_3d_extension_flag)
855 return AVERROR_PATCHWELCOME;
856 if (current->sps_scc_extension_flag)
857 CHECK(FUNC(sps_scc_extension)(ctx, rw, current));
858 if (current->sps_extension_4bits)
859 CHECK(FUNC(extension_data)(ctx, rw, &current->extension_data));
860
861 CHECK(FUNC(rbsp_trailing_bits)(ctx, rw));
862
863 return 0;
864 }
865
866 static int FUNC(pps_range_extension)(CodedBitstreamContext *ctx, RWContext *rw,
867 H265RawPPS *current)
868 {
869 CodedBitstreamH265Context *h265 = ctx->priv_data;
870 const H265RawSPS *sps = h265->active_sps;
871 int err, i;
872
873 if (current->transform_skip_enabled_flag)
874 ue(log2_max_transform_skip_block_size_minus2, 0, 4);
875 flag(cross_component_prediction_enabled_flag);
876
877 flag(chroma_qp_offset_list_enabled_flag);
878 if (current->chroma_qp_offset_list_enabled_flag) {
879 ue(diff_cu_chroma_qp_offset_depth,
880 0, sps->log2_diff_max_min_luma_coding_block_size);
881 ue(chroma_qp_offset_list_len_minus1, 0, 5);
882 for (i = 0; i <= current->chroma_qp_offset_list_len_minus1; i++) {
883 se(cb_qp_offset_list[i], -12, +12);
884 se(cr_qp_offset_list[i], -12, +12);
885 }
886 }
887
888 ue(log2_sao_offset_scale_luma, 0, FFMAX(0, sps->bit_depth_luma_minus8 - 2));
889 ue(log2_sao_offset_scale_chroma, 0, FFMAX(0, sps->bit_depth_chroma_minus8 - 2));
890
891 return 0;
892 }
893
894 static int FUNC(pps_scc_extension)(CodedBitstreamContext *ctx, RWContext *rw,
895 H265RawPPS *current)
896 {
897 int err, comp, i;
898
899 flag(pps_curr_pic_ref_enabled_flag);
900
901 flag(residual_adaptive_colour_transform_enabled_flag);
902 if (current->residual_adaptive_colour_transform_enabled_flag) {
903 flag(pps_slice_act_qp_offsets_present_flag);
904 se(pps_act_y_qp_offset_plus5, -7, +17);
905 se(pps_act_cb_qp_offset_plus5, -7, +17);
906 se(pps_act_cr_qp_offset_plus3, -9, +15);
907 } else {
908 infer(pps_slice_act_qp_offsets_present_flag, 0);
909 infer(pps_act_y_qp_offset_plus5, 0);
910 infer(pps_act_cb_qp_offset_plus5, 0);
911 infer(pps_act_cr_qp_offset_plus3, 0);
912 }
913
914 flag(pps_palette_predictor_initializer_present_flag);
915 if (current->pps_palette_predictor_initializer_present_flag) {
916 ue(pps_num_palette_predictor_initializer, 0, 128);
917 if (current->pps_num_palette_predictor_initializer > 0) {
918 flag(monochrome_palette_flag);
919 ue(luma_bit_depth_entry_minus8, 0, 8);
920 if (!current->monochrome_palette_flag)
921 ue(chroma_bit_depth_entry_minus8, 0, 8);
922 for (comp = 0; comp < (current->monochrome_palette_flag ? 1 : 3); comp++) {
923 int bit_depth = comp == 0 ? current->luma_bit_depth_entry_minus8 + 8
924 : current->chroma_bit_depth_entry_minus8 + 8;
925 for (i = 0; i < current->pps_num_palette_predictor_initializer; i++)
926 u(bit_depth, pps_palette_predictor_initializers[comp][i],
927 0, (1 << bit_depth) - 1);
928 }
929 }
930 }
931
932 return 0;
933 }
934
935 static int FUNC(pps)(CodedBitstreamContext *ctx, RWContext *rw,
936 H265RawPPS *current)
937 {
938 CodedBitstreamH265Context *h265 = ctx->priv_data;
939 const H265RawSPS *sps;
940 int err, i;
941
942 HEADER("Picture Parameter Set");
943
944 CHECK(FUNC(nal_unit_header)(ctx, rw, &current->nal_unit_header, HEVC_NAL_PPS));
945
946 ue(pps_pic_parameter_set_id, 0, 63);
947 ue(pps_seq_parameter_set_id, 0, 15);
948 sps = h265->sps[current->pps_seq_parameter_set_id];
949 if (!sps) {
950 av_log(ctx->log_ctx, AV_LOG_ERROR, "SPS id %d not available.\n",
951 current->pps_seq_parameter_set_id);
952 return AVERROR_INVALIDDATA;
953 }
954 h265->active_sps = sps;
955
956 flag(dependent_slice_segments_enabled_flag);
957 flag(output_flag_present_flag);
958 u(3, num_extra_slice_header_bits, 0, 7);
959 flag(sign_data_hiding_enabled_flag);
960 flag(cabac_init_present_flag);
961
962 ue(num_ref_idx_l0_default_active_minus1, 0, 14);
963 ue(num_ref_idx_l1_default_active_minus1, 0, 14);
964
965 se(init_qp_minus26, -(26 + 6 * sps->bit_depth_luma_minus8), +25);
966
967 flag(constrained_intra_pred_flag);
968 flag(transform_skip_enabled_flag);
969 flag(cu_qp_delta_enabled_flag);
970 if (current->cu_qp_delta_enabled_flag)
971 ue(diff_cu_qp_delta_depth,
972 0, sps->log2_diff_max_min_luma_coding_block_size);
973 else
974 infer(diff_cu_qp_delta_depth, 0);
975
976 se(pps_cb_qp_offset, -12, +12);
977 se(pps_cr_qp_offset, -12, +12);
978 flag(pps_slice_chroma_qp_offsets_present_flag);
979
980 flag(weighted_pred_flag);
981 flag(weighted_bipred_flag);
982
983 flag(transquant_bypass_enabled_flag);
984 flag(tiles_enabled_flag);
985 flag(entropy_coding_sync_enabled_flag);
986
987 if (current->tiles_enabled_flag) {
988 ue(num_tile_columns_minus1, 0, HEVC_MAX_TILE_COLUMNS);
989 ue(num_tile_rows_minus1, 0, HEVC_MAX_TILE_ROWS);
990 flag(uniform_spacing_flag);
991 if (!current->uniform_spacing_flag) {
992 for (i = 0; i < current->num_tile_columns_minus1; i++)
993 ue(column_width_minus1[i], 0, sps->pic_width_in_luma_samples);
994 for (i = 0; i < current->num_tile_rows_minus1; i++)
995 ue(row_height_minus1[i], 0, sps->pic_height_in_luma_samples);
996 }
997 flag(loop_filter_across_tiles_enabled_flag);
998 } else {
999 infer(num_tile_columns_minus1, 0);
1000 infer(num_tile_rows_minus1, 0);
1001 }
1002
1003 flag(pps_loop_filter_across_slices_enabled_flag);
1004 flag(deblocking_filter_control_present_flag);
1005 if (current->deblocking_filter_control_present_flag) {
1006 flag(deblocking_filter_override_enabled_flag);
1007 flag(pps_deblocking_filter_disabled_flag);
1008 if (!current->pps_deblocking_filter_disabled_flag) {
1009 se(pps_beta_offset_div2, -6, +6);
1010 se(pps_tc_offset_div2, -6, +6);
1011 } else {
1012 infer(pps_beta_offset_div2, 0);
1013 infer(pps_tc_offset_div2, 0);
1014 }
1015 } else {
1016 infer(deblocking_filter_override_enabled_flag, 0);
1017 infer(pps_deblocking_filter_disabled_flag, 0);
1018 infer(pps_beta_offset_div2, 0);
1019 infer(pps_tc_offset_div2, 0);
1020 }
1021
1022 flag(pps_scaling_list_data_present_flag);
1023 if (current->pps_scaling_list_data_present_flag)
1024 CHECK(FUNC(scaling_list_data)(ctx, rw, &current->scaling_list));
1025
1026 flag(lists_modification_present_flag);
1027
1028 ue(log2_parallel_merge_level_minus2,
1029 0, (sps->log2_min_luma_coding_block_size_minus3 + 3 +
1030 sps->log2_diff_max_min_luma_coding_block_size - 2));
1031
1032 flag(slice_segment_header_extension_present_flag);
1033
1034 flag(pps_extension_present_flag);
1035 if (current->pps_extension_present_flag) {
1036 flag(pps_range_extension_flag);
1037 flag(pps_multilayer_extension_flag);
1038 flag(pps_3d_extension_flag);
1039 flag(pps_scc_extension_flag);
1040 u(4, pps_extension_4bits, 0, (1 << 4) - 1);
1041 }
1042 if (current->pps_range_extension_flag)
1043 CHECK(FUNC(pps_range_extension)(ctx, rw, current));
1044 if (current->pps_multilayer_extension_flag)
1045 return AVERROR_PATCHWELCOME;
1046 if (current->pps_3d_extension_flag)
1047 return AVERROR_PATCHWELCOME;
1048 if (current->pps_scc_extension_flag)
1049 CHECK(FUNC(pps_scc_extension)(ctx, rw, current));
1050 if (current->pps_extension_4bits)
1051 CHECK(FUNC(extension_data)(ctx, rw, &current->extension_data));
1052
1053 CHECK(FUNC(rbsp_trailing_bits)(ctx, rw));
1054
1055 return 0;
1056 }
1057
1058 static int FUNC(aud)(CodedBitstreamContext *ctx, RWContext *rw,
1059 H265RawAUD *current)
1060 {
1061 int err;
1062
1063 HEADER("Access Unit Delimiter");
1064
1065 CHECK(FUNC(nal_unit_header)(ctx, rw, &current->nal_unit_header, HEVC_NAL_AUD));
1066
1067 u(3, pic_type, 0, 2);
1068
1069 CHECK(FUNC(rbsp_trailing_bits)(ctx, rw));
1070
1071 return 0;
1072 }
1073
1074 static int FUNC(ref_pic_lists_modification)(CodedBitstreamContext *ctx, RWContext *rw,
1075 H265RawSliceHeader *current,
1076 unsigned int num_pic_total_curr)
1077 {
1078 unsigned int entry_size;
1079 int err, i;
1080
1081 entry_size = av_log2(num_pic_total_curr - 1) + 1;
1082
1083 flag(ref_pic_list_modification_flag_l0);
1084 if (current->ref_pic_list_modification_flag_l0) {
1085 for (i = 0; i <= current->num_ref_idx_l0_active_minus1; i++)
1086 u(entry_size, list_entry_l0[i], 0, num_pic_total_curr - 1);
1087 }
1088
1089 if (current->slice_type == HEVC_SLICE_B) {
1090 flag(ref_pic_list_modification_flag_l1);
1091 if (current->ref_pic_list_modification_flag_l1) {
1092 for (i = 0; i <= current->num_ref_idx_l1_active_minus1; i++)
1093 u(entry_size, list_entry_l1[i], 0, num_pic_total_curr - 1);
1094 }
1095 }
1096
1097 return 0;
1098 }
1099
1100 static int FUNC(pred_weight_table)(CodedBitstreamContext *ctx, RWContext *rw,
1101 H265RawSliceHeader *current)
1102 {
1103 CodedBitstreamH265Context *h265 = ctx->priv_data;
1104 const H265RawSPS *sps = h265->active_sps;
1105 int err, i, j;
1106 int chroma = !sps->separate_colour_plane_flag &&
1107 sps->chroma_format_idc != 0;
1108
1109 ue(luma_log2_weight_denom, 0, 7);
1110 if (chroma)
1111 se(delta_chroma_log2_weight_denom, -7, 7);
1112 else
1113 infer(delta_chroma_log2_weight_denom, 0);
1114
1115 for (i = 0; i <= current->num_ref_idx_l0_active_minus1; i++) {
1116 if (1 /* is not same POC and same layer_id */)
1117 flag(luma_weight_l0_flag[i]);
1118 else
1119 infer(luma_weight_l0_flag[i], 0);
1120 }
1121 if (chroma) {
1122 for (i = 0; i <= current->num_ref_idx_l0_active_minus1; i++) {
1123 if (1 /* is not same POC and same layer_id */)
1124 flag(chroma_weight_l0_flag[i]);
1125 else
1126 infer(chroma_weight_l0_flag[i], 0);
1127 }
1128 }
1129
1130 for (i = 0; i <= current->num_ref_idx_l0_active_minus1; i++) {
1131 if (current->luma_weight_l0_flag[i]) {
1132 se(delta_luma_weight_l0[i], -128, +127);
1133 se(luma_offset_l0[i],
1134 -(1 << (sps->bit_depth_luma_minus8 + 8 - 1)),
1135 +(1 << (sps->bit_depth_luma_minus8 + 8 - 1) - 1));
1136 } else {
1137 infer(delta_luma_weight_l0[i], 0);
1138 infer(luma_offset_l0[i], 0);
1139 }
1140 if (current->chroma_weight_l0_flag[i]) {
1141 for (j = 0; j < 2; j++) {
1142 se(delta_chroma_weight_l0[i][j], -128, +127);
1143 se(chroma_offset_l0[i][j],
1144 -(4 << (sps->bit_depth_chroma_minus8 + 8 - 1)),
1145 +(4 << (sps->bit_depth_chroma_minus8 + 8 - 1) - 1));
1146 }
1147 } else {
1148 for (j = 0; j < 2; j++) {
1149 infer(delta_chroma_weight_l0[i][j], 0);
1150 infer(chroma_offset_l0[i][j], 0);
1151 }
1152 }
1153 }
1154
1155 if (current->slice_type == HEVC_SLICE_B) {
1156 for (i = 0; i <= current->num_ref_idx_l1_active_minus1; i++) {
1157 if (1 /* RefPicList1[i] is not CurrPic, nor is it in a different layer */)
1158 flag(luma_weight_l1_flag[i]);
1159 else
1160 infer(luma_weight_l1_flag[i], 0);
1161 }
1162 if (chroma) {
1163 for (i = 0; i <= current->num_ref_idx_l1_active_minus1; i++) {
1164 if (1 /* RefPicList1[i] is not CurrPic, nor is it in a different layer */)
1165 flag(chroma_weight_l1_flag[i]);
1166 else
1167 infer(chroma_weight_l1_flag[i], 0);
1168 }
1169 }
1170
1171 for (i = 0; i <= current->num_ref_idx_l1_active_minus1; i++) {
1172 if (current->luma_weight_l1_flag[i]) {
1173 se(delta_luma_weight_l1[i], -128, +127);
1174 se(luma_offset_l1[i],
1175 - 1 << (sps->bit_depth_luma_minus8 + 8 - 1),
1176 + 1 << (sps->bit_depth_luma_minus8 + 8 - 1) - 1);
1177 } else {
1178 infer(delta_luma_weight_l1[i], 0);
1179 infer(luma_offset_l1[i], 0);
1180 }
1181 if (current->chroma_weight_l1_flag[i]) {
1182 for (j = 0; j < 2; j++) {
1183 se(delta_chroma_weight_l1[i][j], -128, +127);
1184 se(chroma_offset_l1[i][j],
1185 - 4 << (sps->bit_depth_chroma_minus8 + 8 - 1),
1186 + 4 << (sps->bit_depth_chroma_minus8 + 8 - 1) - 1);
1187 }
1188 } else {
1189 for (j = 0; j < 2; j++) {
1190 infer(delta_chroma_weight_l1[i][j], 0);
1191 infer(chroma_offset_l1[i][j], 0);
1192 }
1193 }
1194 }
1195 }
1196
1197 return 0;
1198 }
1199
1200 static int FUNC(slice_segment_header)(CodedBitstreamContext *ctx, RWContext *rw,
1201 H265RawSliceHeader *current)
1202 {
1203 CodedBitstreamH265Context *h265 = ctx->priv_data;
1204 const H265RawSPS *sps;
1205 const H265RawPPS *pps;
1206 unsigned int min_cb_log2_size_y, ctb_log2_size_y, ctb_size_y;
1207 unsigned int pic_width_in_ctbs_y, pic_height_in_ctbs_y, pic_size_in_ctbs_y;
1208 unsigned int num_pic_total_curr = 0;
1209 int err, i;
1210
1211 HEADER("Slice Segment Header");
1212
1213 CHECK(FUNC(nal_unit_header)(ctx, rw, &current->nal_unit_header, -1));
1214
1215 flag(first_slice_segment_in_pic_flag);
1216
1217 if (current->nal_unit_header.nal_unit_type >= HEVC_NAL_BLA_W_LP &&
1218 current->nal_unit_header.nal_unit_type <= HEVC_NAL_IRAP_VCL23)
1219 flag(no_output_of_prior_pics_flag);
1220
1221 ue(slice_pic_parameter_set_id, 0, 63);
1222
1223 pps = h265->pps[current->slice_pic_parameter_set_id];
1224 if (!pps) {
1225 av_log(ctx->log_ctx, AV_LOG_ERROR, "PPS id %d not available.\n",
1226 current->slice_pic_parameter_set_id);
1227 return AVERROR_INVALIDDATA;
1228 }
1229 h265->active_pps = pps;
1230
1231 sps = h265->sps[pps->pps_seq_parameter_set_id];
1232 if (!sps) {
1233 av_log(ctx->log_ctx, AV_LOG_ERROR, "SPS id %d not available.\n",
1234 pps->pps_seq_parameter_set_id);
1235 return AVERROR_INVALIDDATA;
1236 }
1237 h265->active_sps = sps;
1238
1239 min_cb_log2_size_y = sps->log2_min_luma_coding_block_size_minus3 + 3;
1240 ctb_log2_size_y = min_cb_log2_size_y + sps->log2_diff_max_min_luma_coding_block_size;
1241 ctb_size_y = 1 << ctb_log2_size_y;
1242 pic_width_in_ctbs_y =
1243 (sps->pic_width_in_luma_samples + ctb_size_y - 1) / ctb_size_y;
1244 pic_height_in_ctbs_y =
1245 (sps->pic_height_in_luma_samples + ctb_size_y - 1) / ctb_size_y;
1246 pic_size_in_ctbs_y = pic_width_in_ctbs_y * pic_height_in_ctbs_y;
1247
1248 if (!current->first_slice_segment_in_pic_flag) {
1249 unsigned int address_size = av_log2(pic_size_in_ctbs_y - 1) + 1;
1250 if (pps->dependent_slice_segments_enabled_flag)
1251 flag(dependent_slice_segment_flag);
1252 else
1253 infer(dependent_slice_segment_flag, 0);
1254 u(address_size, slice_segment_address, 0, pic_size_in_ctbs_y - 1);
1255 } else {
1256 infer(dependent_slice_segment_flag, 0);
1257 }
1258
1259 if (!current->dependent_slice_segment_flag) {
1260 for (i = 0; i < pps->num_extra_slice_header_bits; i++)
1261 flag(slice_reserved_flag[i]);
1262
1263 ue(slice_type, 0, 2);
1264
1265 if (pps->output_flag_present_flag)
1266 flag(pic_output_flag);
1267
1268 if (sps->separate_colour_plane_flag)
1269 u(2, colour_plane_id, 0, 2);
1270
1271 if (current->nal_unit_header.nal_unit_type != HEVC_NAL_IDR_W_RADL &&
1272 current->nal_unit_header.nal_unit_type != HEVC_NAL_IDR_N_LP) {
1273 const H265RawSTRefPicSet *rps;
1274
1275 u(sps->log2_max_pic_order_cnt_lsb_minus4 + 4, slice_pic_order_cnt_lsb,
1276 0, (1 << (sps->log2_max_pic_order_cnt_lsb_minus4 + 4)) - 1);
1277
1278 flag(short_term_ref_pic_set_sps_flag);
1279 if (!current->short_term_ref_pic_set_sps_flag) {
1280 CHECK(FUNC(st_ref_pic_set)(ctx, rw, &current->short_term_ref_pic_set,
1281 sps->num_short_term_ref_pic_sets, sps));
1282 rps = &current->short_term_ref_pic_set;
1283 } else if (sps->num_short_term_ref_pic_sets > 1) {
1284 unsigned int idx_size = av_log2(sps->num_short_term_ref_pic_sets - 1) + 1;
1285 u(idx_size, short_term_ref_pic_set_idx,
1286 0, sps->num_short_term_ref_pic_sets - 1);
1287 rps = &sps->st_ref_pic_set[current->short_term_ref_pic_set_idx];
1288 } else {
1289 infer(short_term_ref_pic_set_idx, 0);
1290 rps = &sps->st_ref_pic_set[0];
1291 }
1292
1293 num_pic_total_curr = 0;
1294 for (i = 0; i < rps->num_negative_pics; i++)
1295 if (rps->used_by_curr_pic_s0_flag[i])
1296 ++num_pic_total_curr;
1297 for (i = 0; i < rps->num_positive_pics; i++)
1298 if (rps->used_by_curr_pic_s1_flag[i])
1299 ++num_pic_total_curr;
1300
1301 if (sps->long_term_ref_pics_present_flag) {
1302 unsigned int idx_size;
1303
1304 if (sps->num_long_term_ref_pics_sps > 0) {
1305 ue(num_long_term_sps, 0, sps->num_long_term_ref_pics_sps);
1306 idx_size = av_log2(sps->num_long_term_ref_pics_sps - 1) + 1;
1307 } else {
1308 infer(num_long_term_sps, 0);
1309 idx_size = 0;
1310 }
1311 ue(num_long_term_pics, 0, HEVC_MAX_LONG_TERM_REF_PICS);
1312
1313 for (i = 0; i < current->num_long_term_sps +
1314 current->num_long_term_pics; i++) {
1315 if (i < current->num_long_term_sps) {
1316 if (sps->num_long_term_ref_pics_sps > 1)
1317 u(idx_size, lt_idx_sps[i],
1318 0, sps->num_long_term_ref_pics_sps - 1);
1319 if (sps->used_by_curr_pic_lt_sps_flag[current->lt_idx_sps[i]])
1320 ++num_pic_total_curr;
1321 } else {
1322 u(sps->log2_max_pic_order_cnt_lsb_minus4 + 4, poc_lsb_lt[i],
1323 0, (1 << (sps->log2_max_pic_order_cnt_lsb_minus4 + 4)) - 1);
1324 flag(used_by_curr_pic_lt_flag[i]);
1325 if (current->used_by_curr_pic_lt_flag[i])
1326 ++num_pic_total_curr;
1327 }
1328 flag(delta_poc_msb_present_flag[i]);
1329 if (current->delta_poc_msb_present_flag[i])
1330 ue(delta_poc_msb_cycle_lt[i], 0, UINT32_MAX - 1);
1331 else
1332 infer(delta_poc_msb_cycle_lt[i], 0);
1333 }
1334 }
1335
1336 if (sps->sps_temporal_mvp_enabled_flag)
1337 flag(slice_temporal_mvp_enabled_flag);
1338 else
1339 infer(slice_temporal_mvp_enabled_flag, 0);
1340
1341 if (pps->pps_curr_pic_ref_enabled_flag)
1342 ++num_pic_total_curr;
1343 }
1344
1345 if (sps->sample_adaptive_offset_enabled_flag) {
1346 flag(slice_sao_luma_flag);
1347 if (!sps->separate_colour_plane_flag && sps->chroma_format_idc != 0)
1348 flag(slice_sao_chroma_flag);
1349 else
1350 infer(slice_sao_chroma_flag, 0);
1351 } else {
1352 infer(slice_sao_luma_flag, 0);
1353 infer(slice_sao_chroma_flag, 0);
1354 }
1355
1356 if (current->slice_type == HEVC_SLICE_P ||
1357 current->slice_type == HEVC_SLICE_B) {
1358 flag(num_ref_idx_active_override_flag);
1359 if (current->num_ref_idx_active_override_flag) {
1360 ue(num_ref_idx_l0_active_minus1, 0, 14);
1361 if (current->slice_type == HEVC_SLICE_B)
1362 ue(num_ref_idx_l1_active_minus1, 0, 14);
1363 else
1364 infer(num_ref_idx_l1_active_minus1, pps->num_ref_idx_l1_default_active_minus1);
1365 } else {
1366 infer(num_ref_idx_l0_active_minus1, pps->num_ref_idx_l0_default_active_minus1);
1367 infer(num_ref_idx_l1_active_minus1, pps->num_ref_idx_l1_default_active_minus1);
1368 }
1369
1370 if (pps->lists_modification_present_flag && num_pic_total_curr > 1)
1371 CHECK(FUNC(ref_pic_lists_modification)(ctx, rw, current,
1372 num_pic_total_curr));
1373
1374 if (current->slice_type == HEVC_SLICE_B)
1375 flag(mvd_l1_zero_flag);
1376 if (pps->cabac_init_present_flag)
1377 flag(cabac_init_flag);
1378 else
1379 infer(cabac_init_flag, 0);
1380 if (current->slice_temporal_mvp_enabled_flag) {
1381 if (current->slice_type == HEVC_SLICE_B)
1382 flag(collocated_from_l0_flag);
1383 else
1384 infer(collocated_from_l0_flag, 1);
1385 if (current->collocated_from_l0_flag) {
1386 if (current->num_ref_idx_l0_active_minus1 > 0)
1387 ue(collocated_ref_idx, 0, current->num_ref_idx_l0_active_minus1);
1388 else
1389 infer(collocated_ref_idx, 0);
1390 } else {
1391 if (current->num_ref_idx_l1_active_minus1 > 0)
1392 ue(collocated_ref_idx, 0, current->num_ref_idx_l1_active_minus1);
1393 else
1394 infer(collocated_ref_idx, 0);
1395 }
1396 }
1397
1398 if ((pps->weighted_pred_flag && current->slice_type == HEVC_SLICE_P) ||
1399 (pps->weighted_bipred_flag && current->slice_type == HEVC_SLICE_B))
1400 CHECK(FUNC(pred_weight_table)(ctx, rw, current));
1401
1402 ue(five_minus_max_num_merge_cand, 0, 4);
1403 if (sps->motion_vector_resolution_control_idc == 2)
1404 flag(use_integer_mv_flag);
1405 else
1406 infer(use_integer_mv_flag, sps->motion_vector_resolution_control_idc);
1407 }
1408
1409 se(slice_qp_delta,
1410 - 6 * sps->bit_depth_luma_minus8 - (pps->init_qp_minus26 + 26),
1411 + 51 - (pps->init_qp_minus26 + 26));
1412 if (pps->pps_slice_chroma_qp_offsets_present_flag) {
1413 se(slice_cb_qp_offset, -12, +12);
1414 se(slice_cr_qp_offset, -12, +12);
1415 } else {
1416 infer(slice_cb_qp_offset, 0);
1417 infer(slice_cr_qp_offset, 0);
1418 }
1419 if (pps->pps_slice_act_qp_offsets_present_flag) {
1420 se(slice_act_y_qp_offset,
1421 -12 - (pps->pps_act_y_qp_offset_plus5 - 5),
1422 +12 - (pps->pps_act_y_qp_offset_plus5 - 5));
1423 se(slice_act_cb_qp_offset,
1424 -12 - (pps->pps_act_cb_qp_offset_plus5 - 5),
1425 +12 - (pps->pps_act_cb_qp_offset_plus5 - 5));
1426 se(slice_act_cr_qp_offset,
1427 -12 - (pps->pps_act_cr_qp_offset_plus3 - 3),
1428 +12 - (pps->pps_act_cr_qp_offset_plus3 - 3));
1429 } else {
1430 infer(slice_act_y_qp_offset, 0);
1431 infer(slice_act_cb_qp_offset, 0);
1432 infer(slice_act_cr_qp_offset, 0);
1433 }
1434 if (pps->chroma_qp_offset_list_enabled_flag)
1435 flag(cu_chroma_qp_offset_enabled_flag);
1436 else
1437 infer(cu_chroma_qp_offset_enabled_flag, 0);
1438
1439 if (pps->deblocking_filter_override_enabled_flag)
1440 flag(deblocking_filter_override_flag);
1441 else
1442 infer(deblocking_filter_override_flag, 0);
1443 if (current->deblocking_filter_override_flag) {
1444 flag(slice_deblocking_filter_disabled_flag);
1445 if (!current->slice_deblocking_filter_disabled_flag) {
1446 se(slice_beta_offset_div2, -6, +6);
1447 se(slice_tc_offset_div2, -6, +6);
1448 } else {
1449 infer(slice_beta_offset_div2, pps->pps_beta_offset_div2);
1450 infer(slice_tc_offset_div2, pps->pps_tc_offset_div2);
1451 }
1452 } else {
1453 infer(slice_deblocking_filter_disabled_flag,
1454 pps->pps_deblocking_filter_disabled_flag);
1455 infer(slice_beta_offset_div2, pps->pps_beta_offset_div2);
1456 infer(slice_tc_offset_div2, pps->pps_tc_offset_div2);
1457 }
1458 if (pps->pps_loop_filter_across_slices_enabled_flag &&
1459 (current->slice_sao_luma_flag || current->slice_sao_chroma_flag ||
1460 !current->slice_deblocking_filter_disabled_flag))
1461 flag(slice_loop_filter_across_slices_enabled_flag);
1462 else
1463 infer(slice_loop_filter_across_slices_enabled_flag,
1464 pps->pps_loop_filter_across_slices_enabled_flag);
1465 }
1466
1467 if (pps->tiles_enabled_flag || pps->entropy_coding_sync_enabled_flag) {
1468 unsigned int num_entry_point_offsets_limit;
1469 if (!pps->tiles_enabled_flag && pps->entropy_coding_sync_enabled_flag)
1470 num_entry_point_offsets_limit = pic_height_in_ctbs_y - 1;
1471 else if (pps->tiles_enabled_flag && !pps->entropy_coding_sync_enabled_flag)
1472 num_entry_point_offsets_limit =
1473 (pps->num_tile_columns_minus1 + 1) * (pps->num_tile_rows_minus1 + 1);
1474 else
1475 num_entry_point_offsets_limit =
1476 (pps->num_tile_columns_minus1 + 1) * pic_height_in_ctbs_y - 1;
1477 ue(num_entry_point_offsets, 0, num_entry_point_offsets_limit);
1478
1479 if (current->num_entry_point_offsets > HEVC_MAX_ENTRY_POINT_OFFSETS) {
1480 av_log(ctx->log_ctx, AV_LOG_ERROR, "Too many entry points: "
1481 "%"PRIu16".\n", current->num_entry_point_offsets);
1482 return AVERROR_PATCHWELCOME;
1483 }
1484
1485 if (current->num_entry_point_offsets > 0) {
1486 ue(offset_len_minus1, 0, 31);
1487 for (i = 0; i < current->num_entry_point_offsets; i++)
1488 u(current->offset_len_minus1 + 1, entry_point_offset_minus1[i],
1489 0, (1 << (current->offset_len_minus1 + 1)) - 1);
1490 }
1491 }
1492
1493 if (pps->slice_segment_header_extension_present_flag) {
1494 ue(slice_segment_header_extension_length, 0, 256);
1495 for (i = 0; i < current->slice_segment_header_extension_length; i++)
1496 u(8, slice_segment_header_extension_data_byte[i], 0x00, 0xff);
1497 }
1498
1499 CHECK(FUNC(byte_alignment)(ctx, rw));
1500
1501 return 0;
1502 }