Simplify deblock_left/top condition for deblocking_filter=2
[libav.git] / libavutil / intreadwrite.h
1 /*
2 * This file is part of FFmpeg.
3 *
4 * FFmpeg is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
8 *
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with FFmpeg; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #ifndef AVUTIL_INTREADWRITE_H
20 #define AVUTIL_INTREADWRITE_H
21
22 #include <stdint.h>
23 #include "config.h"
24 #include "bswap.h"
25
26 /*
27 * Arch-specific headers can provide any combination of
28 * AV_[RW][BLN](16|24|32|64) and AV_(COPY|SWAP|ZERO)(64|128) macros.
29 * Preprocessor symbols must be defined, even if these are implemented
30 * as inline functions.
31 */
32
33 #if ARCH_ARM
34 # include "arm/intreadwrite.h"
35 #elif ARCH_AVR32
36 # include "avr32/intreadwrite.h"
37 #elif ARCH_MIPS
38 # include "mips/intreadwrite.h"
39 #elif ARCH_PPC
40 # include "ppc/intreadwrite.h"
41 #elif ARCH_X86
42 # include "x86/intreadwrite.h"
43 #endif
44
45 /*
46 * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers.
47 */
48
49 #if HAVE_BIGENDIAN
50
51 # if defined(AV_RN16) && !defined(AV_RB16)
52 # define AV_RB16(p) AV_RN16(p)
53 # elif !defined(AV_RN16) && defined(AV_RB16)
54 # define AV_RN16(p) AV_RB16(p)
55 # endif
56
57 # if defined(AV_WN16) && !defined(AV_WB16)
58 # define AV_WB16(p, v) AV_WN16(p, v)
59 # elif !defined(AV_WN16) && defined(AV_WB16)
60 # define AV_WN16(p, v) AV_WB16(p, v)
61 # endif
62
63 # if defined(AV_RN24) && !defined(AV_RB24)
64 # define AV_RB24(p) AV_RN24(p)
65 # elif !defined(AV_RN24) && defined(AV_RB24)
66 # define AV_RN24(p) AV_RB24(p)
67 # endif
68
69 # if defined(AV_WN24) && !defined(AV_WB24)
70 # define AV_WB24(p, v) AV_WN24(p, v)
71 # elif !defined(AV_WN24) && defined(AV_WB24)
72 # define AV_WN24(p, v) AV_WB24(p, v)
73 # endif
74
75 # if defined(AV_RN32) && !defined(AV_RB32)
76 # define AV_RB32(p) AV_RN32(p)
77 # elif !defined(AV_RN32) && defined(AV_RB32)
78 # define AV_RN32(p) AV_RB32(p)
79 # endif
80
81 # if defined(AV_WN32) && !defined(AV_WB32)
82 # define AV_WB32(p, v) AV_WN32(p, v)
83 # elif !defined(AV_WN32) && defined(AV_WB32)
84 # define AV_WN32(p, v) AV_WB32(p, v)
85 # endif
86
87 # if defined(AV_RN64) && !defined(AV_RB64)
88 # define AV_RB64(p) AV_RN64(p)
89 # elif !defined(AV_RN64) && defined(AV_RB64)
90 # define AV_RN64(p) AV_RB64(p)
91 # endif
92
93 # if defined(AV_WN64) && !defined(AV_WB64)
94 # define AV_WB64(p, v) AV_WN64(p, v)
95 # elif !defined(AV_WN64) && defined(AV_WB64)
96 # define AV_WN64(p, v) AV_WB64(p, v)
97 # endif
98
99 #else /* HAVE_BIGENDIAN */
100
101 # if defined(AV_RN16) && !defined(AV_RL16)
102 # define AV_RL16(p) AV_RN16(p)
103 # elif !defined(AV_RN16) && defined(AV_RL16)
104 # define AV_RN16(p) AV_RL16(p)
105 # endif
106
107 # if defined(AV_WN16) && !defined(AV_WL16)
108 # define AV_WL16(p, v) AV_WN16(p, v)
109 # elif !defined(AV_WN16) && defined(AV_WL16)
110 # define AV_WN16(p, v) AV_WL16(p, v)
111 # endif
112
113 # if defined(AV_RN24) && !defined(AV_RL24)
114 # define AV_RL24(p) AV_RN24(p)
115 # elif !defined(AV_RN24) && defined(AV_RL24)
116 # define AV_RN24(p) AV_RL24(p)
117 # endif
118
119 # if defined(AV_WN24) && !defined(AV_WL24)
120 # define AV_WL24(p, v) AV_WN24(p, v)
121 # elif !defined(AV_WN24) && defined(AV_WL24)
122 # define AV_WN24(p, v) AV_WL24(p, v)
123 # endif
124
125 # if defined(AV_RN32) && !defined(AV_RL32)
126 # define AV_RL32(p) AV_RN32(p)
127 # elif !defined(AV_RN32) && defined(AV_RL32)
128 # define AV_RN32(p) AV_RL32(p)
129 # endif
130
131 # if defined(AV_WN32) && !defined(AV_WL32)
132 # define AV_WL32(p, v) AV_WN32(p, v)
133 # elif !defined(AV_WN32) && defined(AV_WL32)
134 # define AV_WN32(p, v) AV_WL32(p, v)
135 # endif
136
137 # if defined(AV_RN64) && !defined(AV_RL64)
138 # define AV_RL64(p) AV_RN64(p)
139 # elif !defined(AV_RN64) && defined(AV_RL64)
140 # define AV_RN64(p) AV_RL64(p)
141 # endif
142
143 # if defined(AV_WN64) && !defined(AV_WL64)
144 # define AV_WL64(p, v) AV_WN64(p, v)
145 # elif !defined(AV_WN64) && defined(AV_WL64)
146 # define AV_WN64(p, v) AV_WL64(p, v)
147 # endif
148
149 #endif /* !HAVE_BIGENDIAN */
150
151 /*
152 * Define AV_[RW]N helper macros to simplify definitions not provided
153 * by per-arch headers.
154 */
155
156 #if HAVE_ATTRIBUTE_PACKED
157
158 struct unaligned_64 { uint64_t l; } __attribute__((packed));
159 struct unaligned_32 { uint32_t l; } __attribute__((packed));
160 struct unaligned_16 { uint16_t l; } __attribute__((packed));
161
162 # define AV_RN(s, p) (((const struct unaligned_##s *) (p))->l)
163 # define AV_WN(s, p, v) ((((struct unaligned_##s *) (p))->l) = (v))
164
165 #elif defined(__DECC)
166
167 # define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p)))
168 # define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v))
169
170 #elif HAVE_FAST_UNALIGNED
171
172 # define AV_RN(s, p) (*((const uint##s##_t*)(p)))
173 # define AV_WN(s, p, v) (*((uint##s##_t*)(p)) = (v))
174
175 #else
176
177 #ifndef AV_RB16
178 # define AV_RB16(x) \
179 ((((const uint8_t*)(x))[0] << 8) | \
180 ((const uint8_t*)(x))[1])
181 #endif
182 #ifndef AV_WB16
183 # define AV_WB16(p, d) do { \
184 ((uint8_t*)(p))[1] = (d); \
185 ((uint8_t*)(p))[0] = (d)>>8; \
186 } while(0)
187 #endif
188
189 #ifndef AV_RL16
190 # define AV_RL16(x) \
191 ((((const uint8_t*)(x))[1] << 8) | \
192 ((const uint8_t*)(x))[0])
193 #endif
194 #ifndef AV_WL16
195 # define AV_WL16(p, d) do { \
196 ((uint8_t*)(p))[0] = (d); \
197 ((uint8_t*)(p))[1] = (d)>>8; \
198 } while(0)
199 #endif
200
201 #ifndef AV_RB32
202 # define AV_RB32(x) \
203 ((((const uint8_t*)(x))[0] << 24) | \
204 (((const uint8_t*)(x))[1] << 16) | \
205 (((const uint8_t*)(x))[2] << 8) | \
206 ((const uint8_t*)(x))[3])
207 #endif
208 #ifndef AV_WB32
209 # define AV_WB32(p, d) do { \
210 ((uint8_t*)(p))[3] = (d); \
211 ((uint8_t*)(p))[2] = (d)>>8; \
212 ((uint8_t*)(p))[1] = (d)>>16; \
213 ((uint8_t*)(p))[0] = (d)>>24; \
214 } while(0)
215 #endif
216
217 #ifndef AV_RL32
218 # define AV_RL32(x) \
219 ((((const uint8_t*)(x))[3] << 24) | \
220 (((const uint8_t*)(x))[2] << 16) | \
221 (((const uint8_t*)(x))[1] << 8) | \
222 ((const uint8_t*)(x))[0])
223 #endif
224 #ifndef AV_WL32
225 # define AV_WL32(p, d) do { \
226 ((uint8_t*)(p))[0] = (d); \
227 ((uint8_t*)(p))[1] = (d)>>8; \
228 ((uint8_t*)(p))[2] = (d)>>16; \
229 ((uint8_t*)(p))[3] = (d)>>24; \
230 } while(0)
231 #endif
232
233 #ifndef AV_RB64
234 # define AV_RB64(x) \
235 (((uint64_t)((const uint8_t*)(x))[0] << 56) | \
236 ((uint64_t)((const uint8_t*)(x))[1] << 48) | \
237 ((uint64_t)((const uint8_t*)(x))[2] << 40) | \
238 ((uint64_t)((const uint8_t*)(x))[3] << 32) | \
239 ((uint64_t)((const uint8_t*)(x))[4] << 24) | \
240 ((uint64_t)((const uint8_t*)(x))[5] << 16) | \
241 ((uint64_t)((const uint8_t*)(x))[6] << 8) | \
242 (uint64_t)((const uint8_t*)(x))[7])
243 #endif
244 #ifndef AV_WB64
245 # define AV_WB64(p, d) do { \
246 ((uint8_t*)(p))[7] = (d); \
247 ((uint8_t*)(p))[6] = (d)>>8; \
248 ((uint8_t*)(p))[5] = (d)>>16; \
249 ((uint8_t*)(p))[4] = (d)>>24; \
250 ((uint8_t*)(p))[3] = (d)>>32; \
251 ((uint8_t*)(p))[2] = (d)>>40; \
252 ((uint8_t*)(p))[1] = (d)>>48; \
253 ((uint8_t*)(p))[0] = (d)>>56; \
254 } while(0)
255 #endif
256
257 #ifndef AV_RL64
258 # define AV_RL64(x) \
259 (((uint64_t)((const uint8_t*)(x))[7] << 56) | \
260 ((uint64_t)((const uint8_t*)(x))[6] << 48) | \
261 ((uint64_t)((const uint8_t*)(x))[5] << 40) | \
262 ((uint64_t)((const uint8_t*)(x))[4] << 32) | \
263 ((uint64_t)((const uint8_t*)(x))[3] << 24) | \
264 ((uint64_t)((const uint8_t*)(x))[2] << 16) | \
265 ((uint64_t)((const uint8_t*)(x))[1] << 8) | \
266 (uint64_t)((const uint8_t*)(x))[0])
267 #endif
268 #ifndef AV_WL64
269 # define AV_WL64(p, d) do { \
270 ((uint8_t*)(p))[0] = (d); \
271 ((uint8_t*)(p))[1] = (d)>>8; \
272 ((uint8_t*)(p))[2] = (d)>>16; \
273 ((uint8_t*)(p))[3] = (d)>>24; \
274 ((uint8_t*)(p))[4] = (d)>>32; \
275 ((uint8_t*)(p))[5] = (d)>>40; \
276 ((uint8_t*)(p))[6] = (d)>>48; \
277 ((uint8_t*)(p))[7] = (d)>>56; \
278 } while(0)
279 #endif
280
281 #if HAVE_BIGENDIAN
282 # define AV_RN(s, p) AV_RB##s(p)
283 # define AV_WN(s, p, v) AV_WB##s(p, v)
284 #else
285 # define AV_RN(s, p) AV_RL##s(p)
286 # define AV_WN(s, p, v) AV_WL##s(p, v)
287 #endif
288
289 #endif /* HAVE_FAST_UNALIGNED */
290
291 #ifndef AV_RN16
292 # define AV_RN16(p) AV_RN(16, p)
293 #endif
294
295 #ifndef AV_RN32
296 # define AV_RN32(p) AV_RN(32, p)
297 #endif
298
299 #ifndef AV_RN64
300 # define AV_RN64(p) AV_RN(64, p)
301 #endif
302
303 #ifndef AV_WN16
304 # define AV_WN16(p, v) AV_WN(16, p, v)
305 #endif
306
307 #ifndef AV_WN32
308 # define AV_WN32(p, v) AV_WN(32, p, v)
309 #endif
310
311 #ifndef AV_WN64
312 # define AV_WN64(p, v) AV_WN(64, p, v)
313 #endif
314
315 #if HAVE_BIGENDIAN
316 # define AV_RB(s, p) AV_RN##s(p)
317 # define AV_WB(s, p, v) AV_WN##s(p, v)
318 # define AV_RL(s, p) bswap_##s(AV_RN##s(p))
319 # define AV_WL(s, p, v) AV_WN##s(p, bswap_##s(v))
320 #else
321 # define AV_RB(s, p) bswap_##s(AV_RN##s(p))
322 # define AV_WB(s, p, v) AV_WN##s(p, bswap_##s(v))
323 # define AV_RL(s, p) AV_RN##s(p)
324 # define AV_WL(s, p, v) AV_WN##s(p, v)
325 #endif
326
327 #define AV_RB8(x) (((const uint8_t*)(x))[0])
328 #define AV_WB8(p, d) do { ((uint8_t*)(p))[0] = (d); } while(0)
329
330 #define AV_RL8(x) AV_RB8(x)
331 #define AV_WL8(p, d) AV_WB8(p, d)
332
333 #ifndef AV_RB16
334 # define AV_RB16(p) AV_RB(16, p)
335 #endif
336 #ifndef AV_WB16
337 # define AV_WB16(p, v) AV_WB(16, p, v)
338 #endif
339
340 #ifndef AV_RL16
341 # define AV_RL16(p) AV_RL(16, p)
342 #endif
343 #ifndef AV_WL16
344 # define AV_WL16(p, v) AV_WL(16, p, v)
345 #endif
346
347 #ifndef AV_RB32
348 # define AV_RB32(p) AV_RB(32, p)
349 #endif
350 #ifndef AV_WB32
351 # define AV_WB32(p, v) AV_WB(32, p, v)
352 #endif
353
354 #ifndef AV_RL32
355 # define AV_RL32(p) AV_RL(32, p)
356 #endif
357 #ifndef AV_WL32
358 # define AV_WL32(p, v) AV_WL(32, p, v)
359 #endif
360
361 #ifndef AV_RB64
362 # define AV_RB64(p) AV_RB(64, p)
363 #endif
364 #ifndef AV_WB64
365 # define AV_WB64(p, v) AV_WB(64, p, v)
366 #endif
367
368 #ifndef AV_RL64
369 # define AV_RL64(p) AV_RL(64, p)
370 #endif
371 #ifndef AV_WL64
372 # define AV_WL64(p, v) AV_WL(64, p, v)
373 #endif
374
375 #ifndef AV_RB24
376 # define AV_RB24(x) \
377 ((((const uint8_t*)(x))[0] << 16) | \
378 (((const uint8_t*)(x))[1] << 8) | \
379 ((const uint8_t*)(x))[2])
380 #endif
381 #ifndef AV_WB24
382 # define AV_WB24(p, d) do { \
383 ((uint8_t*)(p))[2] = (d); \
384 ((uint8_t*)(p))[1] = (d)>>8; \
385 ((uint8_t*)(p))[0] = (d)>>16; \
386 } while(0)
387 #endif
388
389 #ifndef AV_RL24
390 # define AV_RL24(x) \
391 ((((const uint8_t*)(x))[2] << 16) | \
392 (((const uint8_t*)(x))[1] << 8) | \
393 ((const uint8_t*)(x))[0])
394 #endif
395 #ifndef AV_WL24
396 # define AV_WL24(p, d) do { \
397 ((uint8_t*)(p))[0] = (d); \
398 ((uint8_t*)(p))[1] = (d)>>8; \
399 ((uint8_t*)(p))[2] = (d)>>16; \
400 } while(0)
401 #endif
402
403 /* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be
404 * naturally aligned. They may be implemented using MMX,
405 * so emms_c() must be called before using any float code
406 * afterwards.
407 */
408
409 #define AV_COPY(n, d, s) (*(uint##n##_t*)(d) = *(const uint##n##_t*)(s))
410
411 #ifndef AV_COPY32
412 # define AV_COPY32(d, s) AV_COPY(32, d, s)
413 #endif
414
415 #ifndef AV_COPY64
416 # define AV_COPY64(d, s) AV_COPY(64, d, s)
417 #endif
418
419 #ifndef AV_COPY128
420 # define AV_COPY128(d, s) \
421 do { \
422 AV_COPY64(d, s); \
423 AV_COPY64((char*)(d)+8, (char*)(s)+8); \
424 } while(0)
425 #endif
426
427 #define AV_SWAP(n, a, b) FFSWAP(uint##n##_t, *(uint##n##_t*)(a), *(uint##n##_t*)(b))
428
429 #ifndef AV_SWAP64
430 # define AV_SWAP64(a, b) AV_SWAP(64, a, b)
431 #endif
432
433 #define AV_ZERO(n, d) (*(uint##n##_t*)(d) = 0)
434
435 #ifndef AV_ZERO64
436 # define AV_ZERO64(d) AV_ZERO(64, d)
437 #endif
438
439 #ifndef AV_ZERO128
440 # define AV_ZERO128(d) \
441 do { \
442 AV_ZERO64(d); \
443 AV_ZERO64((char*)(d)+8); \
444 } while(0)
445 #endif
446
447 #endif /* AVUTIL_INTREADWRITE_H */