80 #if CONFIG_VP8_DECODER
123 for (i = 0; i < 5; i++)
210 for (i = 0; i < 4; i++)
213 for (i = 0; i < 4; i++)
217 for (i = 0; i < 3; i++)
226 for (i = 0; i < 4; i++) {
259 if (buf_size - size < 0)
302 for (i = 0; i < 4; i++) {
355 for (i = 0; i < 4; i++)
356 for (j = 0; j < 16; j++)
366 for (i = 0; i < 4; i++)
367 for (j = 0; j < 8; j++)
368 for (k = 0; k < 3; k++)
377 #define VP7_MVC_SIZE 17
378 #define VP8_MVC_SIZE 19
387 for (i = 0; i < 4; i++)
390 for (i = 0; i < 3; i++)
394 for (i = 0; i < 2; i++)
395 for (j = 0; j < mvc_size; j++)
415 for (j = 1; j < 3; j++) {
416 for (i = 0; i < height / 2; i++)
428 for (j = 0; j <
height; j++) {
429 for (i = 0; i <
width; i++) {
430 uint8_t y = src[j * linesize + i];
431 dst[j * linesize + i] = av_clip_uint8(y + ((y * beta) >> 8) + alpha);
442 if (!s->
keyframe && (alpha || beta)) {
465 width, height, dst->
linesize[0], alpha, beta);
474 int part1_size, hscale, vscale, i, j, ret;
482 s->
profile = (buf[0] >> 1) & 7;
490 part1_size =
AV_RL24(buf) >> 4;
495 if (buf_size < part1_size) {
503 buf_size -= part1_size;
511 if (hscale || vscale)
520 for (i = 0; i < 2; i++)
532 for (i = 0; i < 4; i++) {
537 for (j = 0; j < 3; j++)
542 for (j = 0; j < 4; j++)
596 for (i = 1; i < 16; i++)
623 int header_size, hscale, vscale, ret;
630 header_size =
AV_RL24(buf) >> 5;
644 if (header_size > buf_size - 7 * s->
keyframe) {
650 if (
AV_RL24(buf) != 0x2a019d) {
652 "Invalid start code 0x%x\n",
AV_RL24(buf));
655 width =
AV_RL16(buf + 3) & 0x3fff;
656 height =
AV_RL16(buf + 5) & 0x3fff;
657 hscale = buf[4] >> 6;
658 vscale = buf[6] >> 6;
662 if (hscale || vscale)
679 buf_size -= header_size;
757 for (i = 0; i < 3; i++)
759 for (i = (vp7 ? 7 : 9); i > 3; i--)
804 const uint8_t *mbsplits_top, *mbsplits_cur, *firstidx;
814 top_mv = top_mb->
bmv;
830 for (n = 0; n < num; n++) {
832 uint32_t left, above;
836 left =
AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
838 left =
AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
840 above =
AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
842 above =
AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
879 int xoffset,
int yoffset,
int boundary,
880 int *edge_x,
int *edge_y)
882 int vwidth = mb_width + 1;
883 int new = (mb_y + yoffset) * vwidth + mb_x + xoffset;
884 if (
new < boundary ||
new % vwidth == vwidth - 1)
886 *edge_y =
new / vwidth;
887 *edge_x =
new % vwidth;
898 int mb_x,
int mb_y,
int layout)
901 enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR };
902 enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
926 if (
AV_RN32A(&near_mv[CNT_NEAREST])) {
927 if (mv ==
AV_RN32A(&near_mv[CNT_NEAREST])) {
929 }
else if (
AV_RN32A(&near_mv[CNT_NEAR])) {
930 if (mv !=
AV_RN32A(&near_mv[CNT_NEAR]))
938 AV_WN32A(&near_mv[CNT_NEAREST], mv);
959 if (cnt[CNT_NEAREST] > cnt[CNT_NEAR])
960 AV_WN32A(&mb->
mv, cnt[CNT_ZERO] > cnt[CNT_NEAREST] ? 0 :
AV_RN32A(&near_mv[CNT_NEAREST]));
973 mb->
mv = near_mv[CNT_NEAR];
977 mb->
mv = near_mv[CNT_NEAREST];
989 int mb_x,
int mb_y,
int layout)
994 enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
995 enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
1004 mb_edge[0] = mb + 2;
1005 mb_edge[2] = mb + 1;
1016 #define MV_EDGE_CHECK(n) \
1018 VP8Macroblock *edge = mb_edge[n]; \
1019 int edge_ref = edge->ref_frame; \
1020 if (edge_ref != VP56_FRAME_CURRENT) { \
1021 uint32_t mv = AV_RN32A(&edge->mv); \
1023 if (cur_sign_bias != sign_bias[edge_ref]) { \
1026 mv = ((mv & 0x7fff7fff) + \
1027 0x00010001) ^ (mv & 0x80008000); \
1029 if (!n || mv != AV_RN32A(&near_mv[idx])) \
1030 AV_WN32A(&near_mv[++idx], mv); \
1031 cnt[idx] += 1 + (n != 2); \
1033 cnt[CNT_ZERO] += 1 + (n != 2); \
1046 if (cnt[CNT_SPLITMV] &&
1047 AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) ==
AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
1048 cnt[CNT_NEAREST] += 1;
1051 if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
1053 FFSWAP(
VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
1059 clamp_mv(s, &mb->
mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
1070 mb->
bmv[0] = mb->
mv;
1074 mb->
bmv[0] = mb->
mv;
1078 mb->
bmv[0] = mb->
mv;
1083 mb->
bmv[0] = mb->
mv;
1089 int mb_x,
int keyframe,
int layout)
1105 for (y = 0; y < 4; y++) {
1106 for (x = 0; x < 4; x++) {
1110 left[y] = top[x] = *intra4x4;
1116 for (i = 0; i < 16; i++)
1127 const char *vp7_feature_name[] = {
"q-index",
1129 "partial-golden-update",
1134 for (i = 0; i < 4; i++) {
1140 "Feature %s present in macroblock (value 0x%x)\n",
1148 *segment = ref ? *ref : *segment;
1215 int i,
uint8_t *token_prob, int16_t qmul[2],
1216 const uint8_t scan[16],
int vp7)
1230 token_prob = probs[i][0];
1238 token_prob = probs[i + 1][1];
1258 int cat = (a << 1) + b;
1259 coeff = 3 + (8 << cat);
1263 token_prob = probs[i + 1][2];
1265 block[scan[i]] = (
vp8_rac_get(&c) ? -coeff : coeff) * qmul[!!i];
1275 int16_t
dc = block[0];
1284 block[0] = pred[0] =
dc;
1289 block[0] = pred[0] =
dc;
1303 token_prob, qmul, scan,
IS_VP7);
1306 #ifndef vp8_decode_block_coeffs_internal
1333 int i,
int zero_nhood, int16_t qmul[2],
1334 const uint8_t scan[16],
int vp7)
1336 uint8_t *token_prob = probs[i][zero_nhood];
1340 token_prob, qmul, scan)
1350 int i, x, y, luma_start = 0, luma_ctx = 3;
1351 int nnz_pred, nnz, nnz_total = 0;
1356 nnz_pred = t_nnz[8] + l_nnz[8];
1362 l_nnz[8] = t_nnz[8] = !!nnz;
1382 for (y = 0; y < 4; y++)
1383 for (x = 0; x < 4; x++) {
1384 nnz_pred = l_nnz[y] + t_nnz[x];
1387 luma_start, nnz_pred,
1393 t_nnz[x] = l_nnz[y] = !!nnz;
1400 for (i = 4; i < 6; i++)
1401 for (y = 0; y < 2; y++)
1402 for (x = 0; x < 2; x++) {
1403 nnz_pred = l_nnz[i + 2 * y] + t_nnz[i + 2 * x];
1409 t_nnz[i + 2 * x] = l_nnz[i + 2 * y] = !!nnz;
1423 int linesize,
int uvlinesize,
int simple)
1425 AV_COPY128(top_border, src_y + 15 * linesize);
1427 AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
1428 AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
1434 uint8_t *src_cr,
int linesize,
int uvlinesize,
int mb_x,
1435 int mb_y,
int mb_width,
int simple,
int xchg)
1437 uint8_t *top_border_m1 = top_border - 32;
1439 src_cb -= uvlinesize;
1440 src_cr -= uvlinesize;
1442 #define XCHG(a, b, xchg) \
1450 XCHG(top_border_m1 + 8, src_y - 8, xchg);
1451 XCHG(top_border, src_y, xchg);
1452 XCHG(top_border + 8, src_y + 8, 1);
1453 if (mb_x < mb_width - 1)
1454 XCHG(top_border + 32, src_y + 16, 1);
1458 if (!simple || !mb_y) {
1459 XCHG(top_border_m1 + 16, src_cb - 8, xchg);
1460 XCHG(top_border_m1 + 24, src_cr - 8, xchg);
1461 XCHG(top_border + 16, src_cb, 1);
1462 XCHG(top_border + 24, src_cr, 1);
1512 int *copy_buf,
int vp7)
1516 if (!mb_x && mb_y) {
1550 int x, y, mode, nnz;
1566 const uint8_t lo = is_vp7 ? 128 : 127;
1567 const uint8_t hi = is_vp7 ? 128 : 129;
1568 uint8_t tr_top[4] = { lo, lo, lo, lo };
1576 if (mb_y && mb_x == s->
mb_width - 1) {
1577 tr = tr_right[-1] * 0x01010101u;
1584 for (y = 0; y < 4; y++) {
1586 for (x = 0; x < 4; x++) {
1591 if ((y == 0 || x == 3) && mb_y == 0) {
1594 topright = tr_right;
1597 mb_y + y, ©, is_vp7);
1599 dst = copy_dst + 12;
1603 AV_WN32A(copy_dst + 4, lo * 0x01010101U);
1609 copy_dst[3] = ptr[4 * x - s->
linesize - 1];
1618 copy_dst[11] = ptr[4 * x - 1];
1619 copy_dst[19] = ptr[4 * x + s->
linesize - 1];
1620 copy_dst[27] = ptr[4 * x + s->
linesize * 2 - 1];
1621 copy_dst[35] = ptr[4 * x + s->
linesize * 3 - 1];
1624 s->
hpc.
pred4x4[mode](dst, topright, linesize);
1650 mb_x, mb_y, is_vp7);
1661 { 0, 1, 2, 1, 2, 1, 2, 1 },
1663 { 0, 3, 5, 3, 5, 3, 5, 3 },
1664 { 0, 2, 3, 2, 3, 2, 3, 2 },
1686 int x_off,
int y_off,
int block_w,
int block_h,
1693 int src_linesize = linesize;
1695 int mx = (mv->
x << 1) & 7, mx_idx = subpel_idx[0][mx];
1696 int my = (mv->
y << 1) & 7, my_idx = subpel_idx[0][my];
1698 x_off += mv->
x >> 2;
1699 y_off += mv->
y >> 2;
1703 src += y_off * linesize + x_off;
1704 if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1705 y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1707 src - my_idx * linesize - mx_idx,
1709 block_w + subpel_idx[1][mx],
1710 block_h + subpel_idx[1][my],
1711 x_off - mx_idx, y_off - my_idx,
1716 mc_func[my_idx][mx_idx](dst, linesize, src, src_linesize, block_h, mx, my);
1719 mc_func[0][0](dst, linesize, src + y_off * linesize + x_off,
1720 linesize, block_h, 0, 0);
1744 int x_off,
int y_off,
int block_w,
int block_h,
1751 int mx = mv->
x & 7, mx_idx = subpel_idx[0][mx];
1752 int my = mv->
y & 7, my_idx = subpel_idx[0][my];
1754 x_off += mv->
x >> 3;
1755 y_off += mv->
y >> 3;
1758 src1 += y_off * linesize + x_off;
1759 src2 += y_off * linesize + x_off;
1761 if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1762 y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1764 src1 - my_idx * linesize - mx_idx,
1766 block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
1767 x_off - mx_idx, y_off - my_idx, width, height);
1769 mc_func[my_idx][mx_idx](dst1, linesize, src1,
EDGE_EMU_LINESIZE, block_h, mx, my);
1772 src2 - my_idx * linesize - mx_idx,
1773 EDGE_EMU_LINESIZE, linesize,
1774 block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
1775 x_off - mx_idx, y_off - my_idx, width, height);
1777 mc_func[my_idx][mx_idx](dst2, linesize, src2,
EDGE_EMU_LINESIZE, block_h, mx, my);
1779 mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
1780 mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1784 mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1785 mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1792 int bx_off,
int by_off,
int block_w,
int block_h,
1799 ref_frame, mv, x_off + bx_off, y_off + by_off,
1800 block_w, block_h, width, height, s->
linesize,
1819 dst[2] + by_off * s->
uvlinesize + bx_off, ref_frame,
1820 &uvmv, x_off + bx_off, y_off + by_off,
1821 block_w, block_h, width, height, s->
uvlinesize,
1832 if (s->
ref_count[ref - 1] > (mb_xy >> 5)) {
1833 int x_off = mb_x << 4, y_off = mb_y << 4;
1834 int mx = (mb->
mv.
x >> 2) + x_off + 8;
1835 int my = (mb->
mv.
y >> 2) + y_off;
1837 int off = mx + (my + (mb_x & 3) * 4) * s->
linesize + 64;
1842 off = (mx >> 1) + ((my >> 1) + (mb_x & 7)) * s->
uvlinesize + 64;
1854 int x_off = mb_x << 4, y_off = mb_y << 4;
1862 0, 0, 16, 16, width,
height, &mb->
mv);
1869 for (y = 0; y < 4; y++) {
1870 for (x = 0; x < 4; x++) {
1872 ref, &bmv[4 * y + x],
1873 4 * x + x_off, 4 * y + y_off, 4, 4,
1884 for (y = 0; y < 2; y++) {
1885 for (x = 0; x < 2; x++) {
1886 uvmv.
x = mb->
bmv[2 * y * 4 + 2 * x ].
x +
1887 mb->
bmv[2 * y * 4 + 2 * x + 1].
x +
1888 mb->
bmv[(2 * y + 1) * 4 + 2 * x ].x +
1889 mb->
bmv[(2 * y + 1) * 4 + 2 * x + 1].
x;
1890 uvmv.
y = mb->
bmv[2 * y * 4 + 2 * x ].
y +
1891 mb->
bmv[2 * y * 4 + 2 * x + 1].
y +
1892 mb->
bmv[(2 * y + 1) * 4 + 2 * x ].y +
1893 mb->
bmv[(2 * y + 1) * 4 + 2 * x + 1].
y;
1902 &uvmv, 4 * x + x_off, 4 * y + y_off, 4, 4,
1911 0, 0, 16, 8, width,
height, &bmv[0]);
1913 0, 8, 16, 8, width,
height, &bmv[1]);
1917 0, 0, 8, 16, width,
height, &bmv[0]);
1919 8, 0, 8, 16, width,
height, &bmv[1]);
1923 0, 0, 8, 8, width,
height, &bmv[0]);
1925 8, 0, 8, 8, width,
height, &bmv[1]);
1927 0, 8, 8, 8, width,
height, &bmv[2]);
1929 8, 8, 8, 8, width,
height, &bmv[3]);
1941 for (y = 0; y < 4; y++) {
1944 if (nnz4 & ~0x01010101) {
1945 for (x = 0; x < 4; x++) {
1966 for (ch = 0; ch < 2; ch++) {
1969 uint8_t *ch_dst = dst[1 + ch];
1970 if (nnz4 & ~0x01010101) {
1971 for (y = 0; y < 2; y++) {
1972 for (x = 0; x < 2; x++) {
1975 td->
block[4 + ch][(y << 1) + x],
1979 td->
block[4 + ch][(y << 1) + x],
1983 goto chroma_idct_end;
2000 int interior_limit, filter_level;
2014 filter_level = av_clip_uintp2(filter_level, 6);
2016 interior_limit = filter_level;
2021 interior_limit =
FFMAX(interior_limit, 1);
2031 int mb_x,
int mb_y,
int is_vp7)
2033 int mbedge_lim, bedge_lim_y, bedge_lim_uv, hev_thresh;
2039 static const uint8_t hev_thresh_lut[2][64] = {
2040 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2041 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2042 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2044 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2045 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2046 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2054 bedge_lim_y = filter_level;
2055 bedge_lim_uv = filter_level * 2;
2056 mbedge_lim = filter_level + 2;
2059 bedge_lim_uv = filter_level * 2 + inner_limit;
2060 mbedge_lim = bedge_lim_y + 4;
2063 hev_thresh = hev_thresh_lut[s->
keyframe][filter_level];
2067 mbedge_lim, inner_limit, hev_thresh);
2069 mbedge_lim, inner_limit, hev_thresh);
2072 #define H_LOOP_FILTER_16Y_INNER(cond) \
2073 if (cond && inner_filter) { \
2074 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 4, linesize, \
2075 bedge_lim_y, inner_limit, \
2077 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 8, linesize, \
2078 bedge_lim_y, inner_limit, \
2080 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 12, linesize, \
2081 bedge_lim_y, inner_limit, \
2083 s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4, \
2084 uvlinesize, bedge_lim_uv, \
2085 inner_limit, hev_thresh); \
2092 mbedge_lim, inner_limit, hev_thresh);
2094 mbedge_lim, inner_limit, hev_thresh);
2099 linesize, bedge_lim_y,
2100 inner_limit, hev_thresh);
2102 linesize, bedge_lim_y,
2103 inner_limit, hev_thresh);
2105 linesize, bedge_lim_y,
2106 inner_limit, hev_thresh);
2108 dst[2] + 4 * uvlinesize,
2109 uvlinesize, bedge_lim_uv,
2110 inner_limit, hev_thresh);
2120 int mbedge_lim, bedge_lim;
2129 bedge_lim = 2 * filter_level + inner_limit;
2130 mbedge_lim = bedge_lim + 4;
2149 #define MARGIN (16 << 2)
2159 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
2161 ((s->
mb_width + 1) * (mb_y + 1) + 1);
2168 for (mb_x = 0; mb_x < s->
mb_width; mb_x++, mb_xy++, mb++) {
2173 prev_frame && prev_frame->
seg_map ?
2196 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) \
2198 int tmp = (mb_y_check << 16) | (mb_x_check & 0xFFFF); \
2199 if (otd->thread_mb_pos < tmp) { \
2200 pthread_mutex_lock(&otd->lock); \
2201 td->wait_mb_pos = tmp; \
2203 if (otd->thread_mb_pos >= tmp) \
2205 pthread_cond_wait(&otd->cond, &otd->lock); \
2207 td->wait_mb_pos = INT_MAX; \
2208 pthread_mutex_unlock(&otd->lock); \
2212 #define update_pos(td, mb_y, mb_x) \
2214 int pos = (mb_y << 16) | (mb_x & 0xFFFF); \
2215 int sliced_threading = (avctx->active_thread_type == FF_THREAD_SLICE) && \
2217 int is_null = !next_td || !prev_td; \
2218 int pos_check = (is_null) ? 1 \
2219 : (next_td != td && \
2220 pos >= next_td->wait_mb_pos) || \
2222 pos >= prev_td->wait_mb_pos); \
2223 td->thread_mb_pos = pos; \
2224 if (sliced_threading && pos_check) { \
2225 pthread_mutex_lock(&td->lock); \
2226 pthread_cond_broadcast(&td->cond); \
2227 pthread_mutex_unlock(&td->lock); \
2231 #define check_thread_pos(td, otd, mb_x_check, mb_y_check)
2232 #define update_pos(td, mb_y, mb_x)
2236 int jobnr,
int threadnr,
int is_vp7)
2241 int mb_x, mb_xy = mb_y * s->
mb_width;
2254 prev_td = &s->
thread_data[(jobnr + num_jobs - 1) % num_jobs];
2258 next_td = &s->
thread_data[(jobnr + 1) % num_jobs];
2268 memset(mb - 1, 0,
sizeof(*mb));
2272 if (!is_vp7 || mb_y == 0)
2278 for (mb_x = 0; mb_x < s->
mb_width; mb_x++, mb_xy++, mb++) {
2280 if (prev_td != td) {
2281 if (threadnr != 0) {
2283 mb_x + (is_vp7 ? 2 : 1),
2284 mb_y - (is_vp7 ? 2 : 1));
2287 mb_x + (is_vp7 ? 2 : 1) + s->
mb_width + 3,
2288 mb_y - (is_vp7 ? 2 : 1));
2295 dst[2] - dst[1], 2);
2299 prev_frame && prev_frame->seg_map ?
2300 prev_frame->seg_map->data + mb_xy :
NULL, 0, is_vp7);
2331 if (s->
deblock_filter && num_jobs != 1 && threadnr == num_jobs - 1) {
2357 int jobnr,
int threadnr,
int is_vp7)
2379 prev_td = &s->
thread_data[(jobnr + num_jobs - 1) % num_jobs];
2383 next_td = &s->
thread_data[(jobnr + 1) % num_jobs];
2385 for (mb_x = 0; mb_x < s->
mb_width; mb_x++, mb++) {
2389 (mb_x + 1) + (s->
mb_width + 3), mb_y - 1);
2394 if (num_jobs == 1) {
2406 filter_mb(s, dst, f, mb_x, mb_y, is_vp7);
2417 int threadnr,
int is_vp7)
2426 for (mb_y = jobnr; mb_y < s->
mb_height; mb_y += num_jobs) {
2446 int jobnr,
int threadnr)
2452 int jobnr,
int threadnr)
2463 int ret, i, referenced, num_jobs;
2494 for (i = 0; i < 5; i++)
2496 &s->
frames[i] != prev_frame &&
2512 "Discarding interframe without a prior keyframe!\n");
2517 curframe->tf.f->key_frame = s->
keyframe;
2545 s->
linesize = curframe->tf.f->linesize[0];
2618 #if CONFIG_VP7_DECODER
2681 #if CONFIG_VP7_DECODER
2693 #if CONFIG_VP8_DECODER
2709 #define REBASE(pic) pic ? pic - &s_src->frames[0] + &s->frames[0] : NULL
2724 s->
prob[0] = s_src->
prob[!s_src->update_probabilities];
2730 if (s_src->frames[i].tf.f->data[0]) {
2731 int ret = vp8_ref_frame(s, &s->
frames[i], &s_src->frames[i]);
2737 s->
framep[0] = REBASE(s_src->next_framep[0]);
2738 s->
framep[1] = REBASE(s_src->next_framep[1]);
2739 s->
framep[2] = REBASE(s_src->next_framep[2]);
2740 s->
framep[3] = REBASE(s_src->next_framep[3]);
2746 #if CONFIG_VP7_DECODER
2753 .
init = vp7_decode_init,
2755 .
decode = vp7_decode_frame,
2761 #if CONFIG_VP8_DECODER
static void get_quants(VP8Context *s)
VP8Macroblock * macroblocks
static const uint8_t vp8_dc_qlookup[VP8_MAX_QUANT+1]
static av_always_inline void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
static const uint8_t vp8_submv_prob[5][3]
static const uint16_t vp7_ydc_qlookup[]
discard all frames except keyframes
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static const uint8_t vp7_mv_default_prob[2][17]
(only used in prediction) no split MVs
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
void ff_vp7dsp_init(VP8DSPContext *c)
static void update_lf_deltas(VP8Context *s)
This structure describes decoded (raw) audio or video data.
struct VP8Context::@66 segmentation
Base parameters for segmentation, i.e.
static const uint8_t vp7_pred4x4_mode[]
int8_t sign_bias[4]
one state [0, 1] per ref frame type
static av_unused void pthread_cond_init(pthread_cond_t *cond, const void *unused_attr)
int coded_width
Bitstream width / height, may be different from width/height e.g.
static av_always_inline int inter_predict_dc(int16_t block[16], int16_t pred[2])
#define AV_LOG_WARNING
Something somehow does not look correct.
#define VP7_MV_PRED_COUNT
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
uint8_t feature_value[4][4]
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
uint8_t * intra4x4_pred_mode_top
static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
Determine which buffers golden and altref should be updated with after this frame.
void(* vp8_v_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
static int vp7_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16])
uint8_t token[4][16][3][NUM_DCT_TOKENS-1]
static void vp8_decode_flush(AVCodecContext *avctx)
vp8_mc_func put_vp8_bilinear_pixels_tab[3][3][3]
#define DECLARE_ALIGNED(n, t, v)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define FF_ARRAY_ELEMS(a)
static const int8_t vp8_pred8x8c_tree[3][2]
static const uint16_t vp7_y2dc_qlookup[]
struct VP8Context::@70 prob[2]
These are all of the updatable probabilities for binary decisions.
static void copy_luma(AVFrame *dst, AVFrame *src, int width, int height)
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
int update_probabilities
If this flag is not set, all the probability updates are discarded after this frame is decoded...
static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
static int vp8_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2])
static const uint8_t zigzag_scan[16]
vp8_mc_func put_vp8_epel_pixels_tab[3][3][3]
first dimension: width>>3, height is assumed equal to width second dimension: 0 if no vertical interp...
static av_always_inline const uint8_t * get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
static const uint8_t vp8_pred8x8c_prob_inter[3]
static av_always_inline int decode_block_coeffs(VP56RangeCoder *c, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, int zero_nhood, int16_t qmul[2], const uint8_t scan[16], int vp7)
static const uint8_t vp8_mbsplits[5][16]
enum AVDiscard skip_frame
static const int8_t vp8_pred16x16_tree_intra[4][2]
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static int read_mv_component(VP56RangeCoder *c, const uint8_t *p, int vp7)
Motion vector coding, 17.1.
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
int update_golden
VP56_FRAME_NONE if not updated, or which frame to copy if so.
uint8_t intra4x4_pred_mode_top[4]
static av_always_inline void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int mb_x, int mb_y, int mb_width, int simple, int xchg)
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
static int vp7_update_dimensions(VP8Context *s, int width, int height)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
int fade_present
Fade bit present in bitstream (VP7)
static av_always_inline void vp7_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
static VP8Frame * vp8_find_free_buffer(VP8Context *s)
static av_always_inline int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y, int *copy_buf, int vp7)
Multithreading support functions.
int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
static const uint8_t vp8_mv_update_prob[2][19]
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
int update_last
update VP56_FRAME_PREVIOUS with the current one
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
static void copy(LZOContext *c, int cnt)
Copies bytes from input to output buffer with checking.
static void parse_segment_info(VP8Context *s)
int num_coeff_partitions
All coefficients are contained in separate arith coding contexts.
static const uint8_t vp8_token_default_probs[4][8][3][NUM_DCT_TOKENS-1]
vp8_mc_func put_pixels_tab[3][3][3]
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
uint8_t feature_index_prob[4][3]
uint8_t intra4x4_pred_mode_mb[16]
static av_always_inline int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt, int is_vp7)
uint8_t intra4x4_pred_mode_left[4]
#define VERT_VP8_PRED
for VP8, VERT_PRED is the average of
av_cold void ff_vp78dsp_init(VP8DSPContext *dsp)
static const VP56mv * get_bmv_ptr(const VP8Macroblock *mb, int subblock)
static const uint8_t vp8_mbsplit_count[4]
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static const int8_t vp8_coeff_band_indexes[8][10]
static const uint8_t vp8_pred4x4_mode[]
static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
void(* vp8_luma_dc_wht_dc)(int16_t block[4][4][16], int16_t dc[16])
static const uint8_t vp8_dct_cat2_prob[]
static const uint8_t vp8_mv_default_prob[2][19]
static const int sizes[][2]
void(* vp8_h_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static av_always_inline int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
static int pthread_mutex_init(pthread_mutex_t *m, void *attr)
static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int active_thread_type
Which multithreading methods are in use by the codec.
VP8 compatible video decoder.
void(* vp8_v_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
static const uint8_t vp8_mbfirstidx[4][16]
#define EDGE_EMU_LINESIZE
simple assert() macros that are a bit more flexible than ISO C assert().
void av_log(void *avcl, int level, const char *fmt,...)
uint16_t inter_dc_pred[2][2]
Interframe DC prediction (VP7) [0] VP56_FRAME_PREVIOUS [1] VP56_FRAME_GOLDEN.
const char * name
Name of the codec implementation.
VP8Macroblock * macroblocks_base
static av_always_inline void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], ThreadFrame *ref_frame, int x_off, int y_off, int bx_off, int by_off, int block_w, int block_h, int width, int height, VP56mv *mv)
static const uint8_t vp8_pred4x4_prob_inter[9]
uint8_t edge_emu_buffer[21 *EDGE_EMU_LINESIZE]
struct VP8Context::@69 lf_delta
static av_always_inline int decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16], int vp7)
static const int vp7_mode_contexts[31][4]
static void vp7_get_quants(VP8Context *s)
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
static const uint8_t vp8_pred16x16_prob_inter[4]
useful rectangle filling function
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
static int pthread_mutex_destroy(pthread_mutex_t *m)
static av_unused void pthread_cond_destroy(pthread_cond_t *cond)
#define FF_THREAD_FRAME
Decode more than one frame at once.
#define H_LOOP_FILTER_16Y_INNER(cond)
uint8_t feature_present_prob[4]
static av_always_inline void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
chroma MC function
static av_unused int vp8_rac_get_sint(VP56RangeCoder *c, int bits)
int width
picture width / height.
int8_t ref[4]
filter strength adjustment for macroblocks that reference: [0] - intra / VP56_FRAME_CURRENT [1] - VP5...
void(* vp8_idct_dc_add4y)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
static av_cold int vp8_init_frames(VP8Context *s)
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static void free_buffers(VP8Context *s)
#define check_thread_pos(td, otd, mb_x_check, mb_y_check)
static av_unused int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
void(* vp8_mc_func)(uint8_t *dst, ptrdiff_t dstStride, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
int16_t luma_dc_qmul[2]
luma dc-only block quant
static const uint8_t vp8_pred4x4_prob_intra[10][10][9]
uint8_t(* top_border)[16+8+8]
static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f, int is_vp7)
static const int8_t vp7_feature_index_tree[4][2]
static const uint8_t vp7_feature_value_size[2][4]
#define vp56_rac_get_prob
static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
static av_always_inline void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c, VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9], int is_vp7)
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc)
Set the intra prediction function pointers.
static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
if(ac->has_optimized_func)
static const float pred[4]
static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
static const int8_t mv[256][2]
static void vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
static av_always_inline int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
void(* vp8_v_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
void(* vp8_h_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
static av_always_inline void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y)
Apply motion vectors to prediction buffer, chapter 18.
void(* vp8_idct_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Libavcodec external API header.
static const uint8_t vp8_pred8x8c_prob_intra[3]
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static void vp8_release_frame(VP8Context *s, VP8Frame *f)
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
static const uint16_t vp7_yac_qlookup[]
main external API structure.
static void close(AVCodecParserContext *s)
static int vp7_fade_frame(VP8Context *s, VP56RangeCoder *c)
uint8_t * data
The data buffer.
VP8Frame * next_framep[4]
int mb_layout
This describes the macroblock memory layout.
uint8_t left_nnz[9]
For coeff decode, we need to know whether the above block had non-zero coefficients.
static const uint8_t vp8_mbsplit_prob[3]
VP56RangeCoder c
header context, includes mb modes and motion vectors
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
VP56RangeCoder coeff_partition[8]
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
static const int8_t vp8_pred16x16_tree_inter[4][2]
static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
static int vp8_update_dimensions(VP8Context *s, int width, int height)
struct VP8Context::@67 filter
VP8FilterStrength * filter_strength
static av_always_inline void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src)
void(* vp8_idct_dc_add4uv)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
static av_always_inline int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
static void vp78_update_probability_tables(VP8Context *s)
static const int8_t vp8_pred4x4_tree[9][2]
uint8_t enabled
whether each mb can have a different strength based on mode/ref
static av_always_inline void idct_mb(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb)
static void vp78_update_pred16x16_pred8x8_mvc_probabilities(VP8Context *s, int mvc_size)
static const uint8_t subpel_idx[3][8]
static void update_refs(VP8Context *s)
static av_always_inline int vp8_rac_get_coeff(VP56RangeCoder *c, const uint8_t *prob)
static const uint8_t vp8_coeff_band[16]
int allocate_progress
Whether to allocate progress for frame threading.
static const uint16_t vp8_ac_qlookup[VP8_MAX_QUANT+1]
static const uint8_t vp8_pred16x16_prob_intra[4]
static const int8_t vp8_segmentid_tree[][2]
static av_always_inline void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int mb_x, int keyframe, int layout)
void ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
void(* vp8_luma_dc_wht)(int16_t block[4][4][16], int16_t dc[16])
av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
uint8_t feature_enabled[4]
Macroblock features (VP7)
int8_t mode[VP8_MVMODE_SPLIT+1]
filter strength adjustment for the following macroblock modes: [0-3] - i16x16 (always zero) [4] - i4x...
2 8x16 blocks (horizontal)
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
static av_always_inline void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
discard all non reference
static av_always_inline void vp78_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe, VP8Frame *prev_frame, int is_vp7)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static av_always_inline void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
void(* vp8_v_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
#define CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
common internal api header.
#define CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
static void vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
static av_cold void flush(AVCodecContext *avctx)
Flush (reset) the frame ID after seeking.
static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
struct VP8Context::@68 qmat[4]
Macroblocks can have one of 4 different quants in a frame when segmentation is enabled.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
enum AVDiscard skip_loop_filter
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
static av_always_inline int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int layout, int is_vp7)
Split motion vector prediction, 16.4.
static av_cold int init(AVCodecParserContext *s)
static const SiprModeParam modes[MODE_COUNT]
static av_always_inline int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
void(* vp8_h_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width, int xoffset, int yoffset, int boundary, int *edge_x, int *edge_y)
The vp7 reference decoder uses a padding macroblock column (added to right edge of the frame) to guar...
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
#define update_pos(td, mb_y, mb_x)
struct AVCodecInternal * internal
Private context used for internal data.
#define HOR_VP8_PRED
unaveraged version of HOR_PRED, see
static av_always_inline int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
static av_always_inline int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
void(* vp8_idct_dc_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
static av_unused int vp8_rac_get_nn(VP56RangeCoder *c)
void(* vp8_v_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
static void fade(uint8_t *dst, uint8_t *src, int width, int height, int linesize, int alpha, int beta)
static av_always_inline void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
luma MC function
static const uint8_t vp8_token_update_probs[4][8][3][NUM_DCT_TOKENS-1]
static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y, int is_vp7)
int8_t filter_level[4]
base loop filter level
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
static const int vp8_mode_contexts[6][4]
static const uint8_t vp8_dct_cat1_prob[]
#define FFSWAP(type, a, b)
static av_always_inline void vp8_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
uint8_t non_zero_count_cache[6][4]
This is the index plus one of the last non-zero coeff for each of the blocks in the current macrobloc...
void ff_vp8dsp_init(VP8DSPContext *c)
static void vp78_reset_probability_tables(VP8Context *s)
This structure stores compressed data.
static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t buf_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
const uint8_t *const ff_vp8_dct_cat_prob[]
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
VP8ThreadData * thread_data
static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
static const VP7MVPred vp7_mv_pred[VP7_MV_PRED_COUNT]
static const uint16_t vp7_y2ac_qlookup[]
static const uint8_t vp7_submv_prob[3]
static av_always_inline int vp78_decode_init(AVCodecContext *avctx, int is_vp7)