Go to the documentation of this file.
26 #include <aom/aom_decoder.h>
27 #include <aom/aomdx.h>
47 const struct aom_codec_iface *iface)
50 struct aom_codec_dec_cfg deccfg = {
57 if (aom_codec_dec_init(&
ctx->decoder, iface, &deccfg, 0) != AOM_CODEC_OK) {
58 const char *
error = aom_codec_error(&
ctx->decoder);
79 case AOM_IMG_FMT_I420:
80 case AOM_IMG_FMT_I42016:
81 if (
img->bit_depth == 8) {
86 }
else if (
img->bit_depth == 10) {
91 }
else if (
img->bit_depth == 12) {
99 case AOM_IMG_FMT_I422:
100 case AOM_IMG_FMT_I42216:
101 if (
img->bit_depth == 8) {
105 }
else if (
img->bit_depth == 10) {
109 }
else if (
img->bit_depth == 12) {
116 case AOM_IMG_FMT_I444:
117 case AOM_IMG_FMT_I44416:
118 if (
img->bit_depth == 8) {
123 }
else if (
img->bit_depth == 10) {
129 }
else if (
img->bit_depth == 12) {
144 const uint8_t *
buffer,
size_t buffer_size)
152 const int country_code = bytestream2_get_byteu(&bc);
153 const int provider_code = bytestream2_get_be16u(&bc);
154 const int provider_oriented_code = bytestream2_get_be16u(&bc);
155 const int application_identifier = bytestream2_get_byteu(&bc);
160 && provider_oriented_code == 0x0001
161 && application_identifier == 0x04) {
178 const size_t num_metadata = aom_img_num_metadata(
img);
179 for (
size_t i = 0;
i < num_metadata; ++
i) {
180 const aom_metadata_t *
metadata = aom_img_get_metadata(
img,
i);
185 case OBU_METADATA_TYPE_ITUT_T35: {
202 const void *iter =
NULL;
203 struct aom_image *
img;
208 const char *
error = aom_codec_error(&
ctx->decoder);
209 const char *detail = aom_codec_error_detail(&
ctx->decoder);
218 if ((
img = aom_codec_get_frame(&
ctx->decoder, &iter))) {
227 img->fmt,
img->bit_depth);
241 #ifdef AOM_CTRL_AOMD_GET_FRAME_FLAGS
243 aom_codec_frame_flags_t
flags;
244 ret = aom_codec_control(&
ctx->decoder, AOMD_GET_FRAME_FLAGS, &
flags);
245 if (
ret == AOM_CODEC_OK) {
246 if (
flags & AOM_FRAME_IS_KEY)
250 if (
flags & (AOM_FRAME_IS_KEY | AOM_FRAME_IS_INTRAONLY))
252 else if (
flags & AOM_FRAME_IS_SWITCH)
267 if ((
img->fmt & AOM_IMG_FMT_HIGHBITDEPTH) &&
img->bit_depth == 8)
270 const uint8_t *
planes[4] = {
img->planes[0],
img->planes[1],
img->planes[2] };
271 const int stride[4] = {
img->stride[0],
img->stride[1],
img->stride[2] };
289 aom_codec_destroy(&
ctx->decoder);
295 return aom_init(avctx, aom_codec_av1_dx());
299 .
p.
name =
"libaom-av1",
311 .p.wrapper_name =
"libaom",
static void error(const char *err)
static av_cold int aom_free(AVCodecContext *avctx)
static av_cold int aom_init(AVCodecContext *avctx, const struct aom_codec_iface *iface)
Filter the word βframeβ indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static av_always_inline int bytestream2_get_bytes_left(const GetByteContext *g)
enum AVColorSpace colorspace
YUV colorspace type.
This structure describes decoded (raw) audio or video data.
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
@ AVCOL_RANGE_JPEG
Full range content.
static int set_pix_fmt(AVCodecContext *avctx, struct aom_image *img)
#define AV_PIX_FMT_YUV420P10
#define FF_CODEC_CAP_NOT_INIT_THREADSAFE
The codec is not known to be init-threadsafe (i.e.
#define AV_LOG_VERBOSE
Detailed information.
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
#define AV_PROFILE_AV1_PROFESSIONAL
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static av_cold void close(AVCodecParserContext *s)
AVCodec p
The public AVCodec.
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
#define AV_PIX_FMT_GBRP10
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
#define AV_PIX_FMT_YUV444P10
struct aom_codec_ctx decoder
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
#define FF_CODEC_DECODE_CB(func)
#define AV_CODEC_CAP_OTHER_THREADS
Codec supports multithreading through a method other than slice- or frame-level multithreading.
const AVProfile ff_av1_profiles[]
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define CODEC_LONG_NAME(str)
#define AV_PIX_FMT_GRAY10
enum AVColorRange color_range
MPEG vs JPEG YUV range.
const FFCodec ff_libaom_av1_decoder
@ AV_PICTURE_TYPE_I
Intra.
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
#define AV_PIX_FMT_YUV422P10
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
@ AV_PICTURE_TYPE_SP
Switching Predicted.
enum AVPictureType pict_type
Picture type of the frame.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static av_cold int av1_init(AVCodecContext *avctx)
#define AV_PIX_FMT_YUV422P12
static const struct @522 planes[]
#define AV_PIX_FMT_YUV444P12
static int aom_decode(AVCodecContext *avctx, AVFrame *picture, int *got_frame, AVPacket *avpkt)
#define AVERROR_EXTERNAL
Generic error in an external library.
#define AV_PROFILE_AV1_HIGH
#define AV_LOG_INFO
Standard information.
#define i(width, name, range_min, range_max)
#define AV_PIX_FMT_GBRP12
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
This struct represents dynamic metadata for color volume transform - application 4 of SMPTE 2094-40:2...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
#define AV_PIX_FMT_YUV420P12
main external API structure.
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
@ AV_PICTURE_TYPE_P
Predicted.
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
#define FF_CODEC_CAP_AUTO_THREADS
Codec handles avctx->thread_count == 0 (auto) internally.
#define ITU_T_T35_COUNTRY_CODE_US
void ff_aom_image_copy_16_to_8(AVFrame *pic, struct aom_image *img)
This structure stores compressed data.
static int decode_metadata_itu_t_t35(AVFrame *frame, const uint8_t *buffer, size_t buffer_size)
int width
picture width / height.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
#define ITU_T_T35_PROVIDER_CODE_SAMSUNG
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define AV_PIX_FMT_GRAY12
void av_image_copy(uint8_t *const dst_data[4], const int dst_linesizes[4], const uint8_t *const src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
#define AV_PROFILE_AV1_MAIN
AVColorRange
Visual content value range.
static int decode_metadata(AVFrame *frame, const struct aom_image *img)