1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "decodemv.h"
12 #include "treereader.h"
13 #include "vp8/common/entropymv.h"
14 #include "vp8/common/entropymode.h"
15 #include "onyxd_int.h"
16 #include "vp8/common/findnearmv.h"
17
read_bmode(vp8_reader * bc,const vp8_prob * p)18 static B_PREDICTION_MODE read_bmode(vp8_reader *bc, const vp8_prob *p) {
19 const int i = vp8_treed_read(bc, vp8_bmode_tree, p);
20
21 return (B_PREDICTION_MODE)i;
22 }
23
read_ymode(vp8_reader * bc,const vp8_prob * p)24 static MB_PREDICTION_MODE read_ymode(vp8_reader *bc, const vp8_prob *p) {
25 const int i = vp8_treed_read(bc, vp8_ymode_tree, p);
26
27 return (MB_PREDICTION_MODE)i;
28 }
29
read_kf_ymode(vp8_reader * bc,const vp8_prob * p)30 static MB_PREDICTION_MODE read_kf_ymode(vp8_reader *bc, const vp8_prob *p) {
31 const int i = vp8_treed_read(bc, vp8_kf_ymode_tree, p);
32
33 return (MB_PREDICTION_MODE)i;
34 }
35
read_uv_mode(vp8_reader * bc,const vp8_prob * p)36 static MB_PREDICTION_MODE read_uv_mode(vp8_reader *bc, const vp8_prob *p) {
37 const int i = vp8_treed_read(bc, vp8_uv_mode_tree, p);
38
39 return (MB_PREDICTION_MODE)i;
40 }
41
read_kf_modes(VP8D_COMP * pbi,MODE_INFO * mi)42 static void read_kf_modes(VP8D_COMP *pbi, MODE_INFO *mi) {
43 vp8_reader *const bc = &pbi->mbc[8];
44 const int mis = pbi->common.mode_info_stride;
45
46 mi->mbmi.ref_frame = INTRA_FRAME;
47 mi->mbmi.mode = read_kf_ymode(bc, vp8_kf_ymode_prob);
48
49 if (mi->mbmi.mode == B_PRED) {
50 int i = 0;
51 mi->mbmi.is_4x4 = 1;
52
53 do {
54 const B_PREDICTION_MODE A = above_block_mode(mi, i, mis);
55 const B_PREDICTION_MODE L = left_block_mode(mi, i);
56
57 mi->bmi[i].as_mode = read_bmode(bc, vp8_kf_bmode_prob[A][L]);
58 } while (++i < 16);
59 }
60
61 mi->mbmi.uv_mode = read_uv_mode(bc, vp8_kf_uv_mode_prob);
62 }
63
read_mvcomponent(vp8_reader * r,const MV_CONTEXT * mvc)64 static int read_mvcomponent(vp8_reader *r, const MV_CONTEXT *mvc) {
65 const vp8_prob *const p = (const vp8_prob *)mvc;
66 int x = 0;
67
68 if (vp8_read(r, p[mvpis_short])) { /* Large */
69 int i = 0;
70
71 do {
72 x += vp8_read(r, p[MVPbits + i]) << i;
73 } while (++i < 3);
74
75 i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */
76
77 do {
78 x += vp8_read(r, p[MVPbits + i]) << i;
79 } while (--i > 3);
80
81 if (!(x & 0xFFF0) || vp8_read(r, p[MVPbits + 3])) x += 8;
82 } else { /* small */
83 x = vp8_treed_read(r, vp8_small_mvtree, p + MVPshort);
84 }
85
86 if (x && vp8_read(r, p[MVPsign])) x = -x;
87
88 return x;
89 }
90
read_mv(vp8_reader * r,MV * mv,const MV_CONTEXT * mvc)91 static void read_mv(vp8_reader *r, MV *mv, const MV_CONTEXT *mvc) {
92 mv->row = (short)(read_mvcomponent(r, mvc) * 2);
93 mv->col = (short)(read_mvcomponent(r, ++mvc) * 2);
94 }
95
read_mvcontexts(vp8_reader * bc,MV_CONTEXT * mvc)96 static void read_mvcontexts(vp8_reader *bc, MV_CONTEXT *mvc) {
97 int i = 0;
98
99 do {
100 const vp8_prob *up = vp8_mv_update_probs[i].prob;
101 vp8_prob *p = (vp8_prob *)(mvc + i);
102 vp8_prob *const pstop = p + MVPcount;
103
104 do {
105 if (vp8_read(bc, *up++)) {
106 const vp8_prob x = (vp8_prob)vp8_read_literal(bc, 7);
107
108 *p = x ? x << 1 : 1;
109 }
110 } while (++p < pstop);
111 } while (++i < 2);
112 }
113
114 static const unsigned char mbsplit_fill_count[4] = { 8, 8, 4, 1 };
115 static const unsigned char mbsplit_fill_offset[4][16] = {
116 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
117 { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15 },
118 { 0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15 },
119 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }
120 };
121
mb_mode_mv_init(VP8D_COMP * pbi)122 static void mb_mode_mv_init(VP8D_COMP *pbi) {
123 vp8_reader *const bc = &pbi->mbc[8];
124 MV_CONTEXT *const mvc = pbi->common.fc.mvc;
125
126 #if CONFIG_ERROR_CONCEALMENT
127 /* Default is that no macroblock is corrupt, therefore we initialize
128 * mvs_corrupt_from_mb to something very big, which we can be sure is
129 * outside the frame. */
130 pbi->mvs_corrupt_from_mb = UINT_MAX;
131 #endif
132 /* Read the mb_no_coeff_skip flag */
133 pbi->common.mb_no_coeff_skip = (int)vp8_read_bit(bc);
134
135 pbi->prob_skip_false = 0;
136 if (pbi->common.mb_no_coeff_skip) {
137 pbi->prob_skip_false = (vp8_prob)vp8_read_literal(bc, 8);
138 }
139
140 if (pbi->common.frame_type != KEY_FRAME) {
141 pbi->prob_intra = (vp8_prob)vp8_read_literal(bc, 8);
142 pbi->prob_last = (vp8_prob)vp8_read_literal(bc, 8);
143 pbi->prob_gf = (vp8_prob)vp8_read_literal(bc, 8);
144
145 if (vp8_read_bit(bc)) {
146 int i = 0;
147
148 do {
149 pbi->common.fc.ymode_prob[i] = (vp8_prob)vp8_read_literal(bc, 8);
150 } while (++i < 4);
151 }
152
153 if (vp8_read_bit(bc)) {
154 int i = 0;
155
156 do {
157 pbi->common.fc.uv_mode_prob[i] = (vp8_prob)vp8_read_literal(bc, 8);
158 } while (++i < 3);
159 }
160
161 read_mvcontexts(bc, mvc);
162 }
163 }
164
165 const vp8_prob vp8_sub_mv_ref_prob3[8][VP8_SUBMVREFS - 1] = {
166 { 147, 136, 18 }, /* SUBMVREF_NORMAL */
167 { 223, 1, 34 }, /* SUBMVREF_LEFT_ABOVE_SAME */
168 { 106, 145, 1 }, /* SUBMVREF_LEFT_ZED */
169 { 208, 1, 1 }, /* SUBMVREF_LEFT_ABOVE_ZED */
170 { 179, 121, 1 }, /* SUBMVREF_ABOVE_ZED */
171 { 223, 1, 34 }, /* SUBMVREF_LEFT_ABOVE_SAME */
172 { 179, 121, 1 }, /* SUBMVREF_ABOVE_ZED */
173 { 208, 1, 1 } /* SUBMVREF_LEFT_ABOVE_ZED */
174 };
175
get_sub_mv_ref_prob(const uint32_t left,const uint32_t above)176 static const vp8_prob *get_sub_mv_ref_prob(const uint32_t left,
177 const uint32_t above) {
178 int lez = (left == 0);
179 int aez = (above == 0);
180 int lea = (left == above);
181 const vp8_prob *prob;
182
183 prob = vp8_sub_mv_ref_prob3[(aez << 2) | (lez << 1) | (lea)];
184
185 return prob;
186 }
187
decode_split_mv(vp8_reader * const bc,MODE_INFO * mi,const MODE_INFO * left_mb,const MODE_INFO * above_mb,MB_MODE_INFO * mbmi,int_mv best_mv,MV_CONTEXT * const mvc,int mb_to_left_edge,int mb_to_right_edge,int mb_to_top_edge,int mb_to_bottom_edge)188 static void decode_split_mv(vp8_reader *const bc, MODE_INFO *mi,
189 const MODE_INFO *left_mb, const MODE_INFO *above_mb,
190 MB_MODE_INFO *mbmi, int_mv best_mv,
191 MV_CONTEXT *const mvc, int mb_to_left_edge,
192 int mb_to_right_edge, int mb_to_top_edge,
193 int mb_to_bottom_edge) {
194 int s; /* split configuration (16x8, 8x16, 8x8, 4x4) */
195 /* number of partitions in the split configuration (see vp8_mbsplit_count) */
196 int num_p;
197 int j = 0;
198
199 s = 3;
200 num_p = 16;
201 if (vp8_read(bc, 110)) {
202 s = 2;
203 num_p = 4;
204 if (vp8_read(bc, 111)) {
205 s = vp8_read(bc, 150);
206 num_p = 2;
207 }
208 }
209
210 do /* for each subset j */
211 {
212 int_mv leftmv, abovemv;
213 int_mv blockmv;
214 int k; /* first block in subset j */
215
216 const vp8_prob *prob;
217 k = vp8_mbsplit_offset[s][j];
218
219 if (!(k & 3)) {
220 /* On L edge, get from MB to left of us */
221 if (left_mb->mbmi.mode != SPLITMV) {
222 leftmv.as_int = left_mb->mbmi.mv.as_int;
223 } else {
224 leftmv.as_int = (left_mb->bmi + k + 4 - 1)->mv.as_int;
225 }
226 } else {
227 leftmv.as_int = (mi->bmi + k - 1)->mv.as_int;
228 }
229
230 if (!(k >> 2)) {
231 /* On top edge, get from MB above us */
232 if (above_mb->mbmi.mode != SPLITMV) {
233 abovemv.as_int = above_mb->mbmi.mv.as_int;
234 } else {
235 abovemv.as_int = (above_mb->bmi + k + 16 - 4)->mv.as_int;
236 }
237 } else {
238 abovemv.as_int = (mi->bmi + k - 4)->mv.as_int;
239 }
240
241 prob = get_sub_mv_ref_prob(leftmv.as_int, abovemv.as_int);
242
243 if (vp8_read(bc, prob[0])) {
244 if (vp8_read(bc, prob[1])) {
245 blockmv.as_int = 0;
246 if (vp8_read(bc, prob[2])) {
247 blockmv.as_mv.row = read_mvcomponent(bc, &mvc[0]) * 2;
248 blockmv.as_mv.row += best_mv.as_mv.row;
249 blockmv.as_mv.col = read_mvcomponent(bc, &mvc[1]) * 2;
250 blockmv.as_mv.col += best_mv.as_mv.col;
251 }
252 } else {
253 blockmv.as_int = abovemv.as_int;
254 }
255 } else {
256 blockmv.as_int = leftmv.as_int;
257 }
258
259 mbmi->need_to_clamp_mvs |=
260 vp8_check_mv_bounds(&blockmv, mb_to_left_edge, mb_to_right_edge,
261 mb_to_top_edge, mb_to_bottom_edge);
262
263 {
264 /* Fill (uniform) modes, mvs of jth subset.
265 Must do it here because ensuing subsets can
266 refer back to us via "left" or "above". */
267 const unsigned char *fill_offset;
268 unsigned int fill_count = mbsplit_fill_count[s];
269
270 fill_offset =
271 &mbsplit_fill_offset[s][(unsigned char)j * mbsplit_fill_count[s]];
272
273 do {
274 mi->bmi[*fill_offset].mv.as_int = blockmv.as_int;
275 fill_offset++;
276 } while (--fill_count);
277 }
278
279 } while (++j < num_p);
280
281 mbmi->partitioning = s;
282 }
283
read_mb_modes_mv(VP8D_COMP * pbi,MODE_INFO * mi,MB_MODE_INFO * mbmi)284 static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi,
285 MB_MODE_INFO *mbmi) {
286 vp8_reader *const bc = &pbi->mbc[8];
287 mbmi->ref_frame = (MV_REFERENCE_FRAME)vp8_read(bc, pbi->prob_intra);
288 if (mbmi->ref_frame) { /* inter MB */
289 enum { CNT_INTRA, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
290 int cnt[4];
291 int *cntx = cnt;
292 int_mv near_mvs[4];
293 int_mv *nmv = near_mvs;
294 const int mis = pbi->mb.mode_info_stride;
295 const MODE_INFO *above = mi - mis;
296 const MODE_INFO *left = mi - 1;
297 const MODE_INFO *aboveleft = above - 1;
298 int *ref_frame_sign_bias = pbi->common.ref_frame_sign_bias;
299
300 mbmi->need_to_clamp_mvs = 0;
301
302 if (vp8_read(bc, pbi->prob_last)) {
303 mbmi->ref_frame =
304 (MV_REFERENCE_FRAME)((int)(2 + vp8_read(bc, pbi->prob_gf)));
305 }
306
307 /* Zero accumulators */
308 nmv[0].as_int = nmv[1].as_int = nmv[2].as_int = 0;
309 cnt[0] = cnt[1] = cnt[2] = cnt[3] = 0;
310
311 /* Process above */
312 if (above->mbmi.ref_frame != INTRA_FRAME) {
313 if (above->mbmi.mv.as_int) {
314 (++nmv)->as_int = above->mbmi.mv.as_int;
315 mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], mbmi->ref_frame,
316 nmv, ref_frame_sign_bias);
317 ++cntx;
318 }
319
320 *cntx += 2;
321 }
322
323 /* Process left */
324 if (left->mbmi.ref_frame != INTRA_FRAME) {
325 if (left->mbmi.mv.as_int) {
326 int_mv this_mv;
327
328 this_mv.as_int = left->mbmi.mv.as_int;
329 mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], mbmi->ref_frame,
330 &this_mv, ref_frame_sign_bias);
331
332 if (this_mv.as_int != nmv->as_int) {
333 (++nmv)->as_int = this_mv.as_int;
334 ++cntx;
335 }
336
337 *cntx += 2;
338 } else {
339 cnt[CNT_INTRA] += 2;
340 }
341 }
342
343 /* Process above left */
344 if (aboveleft->mbmi.ref_frame != INTRA_FRAME) {
345 if (aboveleft->mbmi.mv.as_int) {
346 int_mv this_mv;
347
348 this_mv.as_int = aboveleft->mbmi.mv.as_int;
349 mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], mbmi->ref_frame,
350 &this_mv, ref_frame_sign_bias);
351
352 if (this_mv.as_int != nmv->as_int) {
353 (++nmv)->as_int = this_mv.as_int;
354 ++cntx;
355 }
356
357 *cntx += 1;
358 } else {
359 cnt[CNT_INTRA] += 1;
360 }
361 }
362
363 if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_INTRA]][0])) {
364 /* If we have three distinct MV's ... */
365 /* See if above-left MV can be merged with NEAREST */
366 cnt[CNT_NEAREST] += ((cnt[CNT_SPLITMV] > 0) &
367 (nmv->as_int == near_mvs[CNT_NEAREST].as_int));
368
369 /* Swap near and nearest if necessary */
370 if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
371 int tmp;
372 tmp = cnt[CNT_NEAREST];
373 cnt[CNT_NEAREST] = cnt[CNT_NEAR];
374 cnt[CNT_NEAR] = tmp;
375 tmp = (int)near_mvs[CNT_NEAREST].as_int;
376 near_mvs[CNT_NEAREST].as_int = near_mvs[CNT_NEAR].as_int;
377 near_mvs[CNT_NEAR].as_int = (uint32_t)tmp;
378 }
379
380 if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
381 if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
382 int mb_to_top_edge;
383 int mb_to_bottom_edge;
384 int mb_to_left_edge;
385 int mb_to_right_edge;
386 MV_CONTEXT *const mvc = pbi->common.fc.mvc;
387 int near_index;
388
389 mb_to_top_edge = pbi->mb.mb_to_top_edge;
390 mb_to_bottom_edge = pbi->mb.mb_to_bottom_edge;
391 mb_to_top_edge -= LEFT_TOP_MARGIN;
392 mb_to_bottom_edge += RIGHT_BOTTOM_MARGIN;
393 mb_to_right_edge = pbi->mb.mb_to_right_edge;
394 mb_to_right_edge += RIGHT_BOTTOM_MARGIN;
395 mb_to_left_edge = pbi->mb.mb_to_left_edge;
396 mb_to_left_edge -= LEFT_TOP_MARGIN;
397
398 /* Use near_mvs[0] to store the "best" MV */
399 near_index = CNT_INTRA + (cnt[CNT_NEAREST] >= cnt[CNT_INTRA]);
400
401 vp8_clamp_mv2(&near_mvs[near_index], &pbi->mb);
402
403 cnt[CNT_SPLITMV] =
404 ((above->mbmi.mode == SPLITMV) + (left->mbmi.mode == SPLITMV)) *
405 2 +
406 (aboveleft->mbmi.mode == SPLITMV);
407
408 if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
409 decode_split_mv(bc, mi, left, above, mbmi, near_mvs[near_index],
410 mvc, mb_to_left_edge, mb_to_right_edge,
411 mb_to_top_edge, mb_to_bottom_edge);
412 mbmi->mv.as_int = mi->bmi[15].mv.as_int;
413 mbmi->mode = SPLITMV;
414 mbmi->is_4x4 = 1;
415 } else {
416 int_mv *const mbmi_mv = &mbmi->mv;
417 read_mv(bc, &mbmi_mv->as_mv, (const MV_CONTEXT *)mvc);
418 mbmi_mv->as_mv.row += near_mvs[near_index].as_mv.row;
419 mbmi_mv->as_mv.col += near_mvs[near_index].as_mv.col;
420
421 /* Don't need to check this on NEARMV and NEARESTMV
422 * modes since those modes clamp the MV. The NEWMV mode
423 * does not, so signal to the prediction stage whether
424 * special handling may be required.
425 */
426 mbmi->need_to_clamp_mvs =
427 vp8_check_mv_bounds(mbmi_mv, mb_to_left_edge, mb_to_right_edge,
428 mb_to_top_edge, mb_to_bottom_edge);
429 mbmi->mode = NEWMV;
430 }
431 } else {
432 mbmi->mode = NEARMV;
433 mbmi->mv.as_int = near_mvs[CNT_NEAR].as_int;
434 vp8_clamp_mv2(&mbmi->mv, &pbi->mb);
435 }
436 } else {
437 mbmi->mode = NEARESTMV;
438 mbmi->mv.as_int = near_mvs[CNT_NEAREST].as_int;
439 vp8_clamp_mv2(&mbmi->mv, &pbi->mb);
440 }
441 } else {
442 mbmi->mode = ZEROMV;
443 mbmi->mv.as_int = 0;
444 }
445
446 #if CONFIG_ERROR_CONCEALMENT
447 if (pbi->ec_enabled && (mbmi->mode != SPLITMV)) {
448 mi->bmi[0].mv.as_int = mi->bmi[1].mv.as_int = mi->bmi[2].mv.as_int =
449 mi->bmi[3].mv.as_int = mi->bmi[4].mv.as_int = mi->bmi[5].mv.as_int =
450 mi->bmi[6].mv.as_int = mi->bmi[7].mv.as_int =
451 mi->bmi[8].mv.as_int = mi->bmi[9].mv.as_int =
452 mi->bmi[10].mv.as_int = mi->bmi[11].mv.as_int =
453 mi->bmi[12].mv.as_int = mi->bmi[13].mv.as_int =
454 mi->bmi[14].mv.as_int = mi->bmi[15].mv.as_int =
455 mbmi->mv.as_int;
456 }
457 #endif
458 } else {
459 /* required for left and above block mv */
460 mbmi->mv.as_int = 0;
461
462 /* MB is intra coded */
463 if ((mbmi->mode = read_ymode(bc, pbi->common.fc.ymode_prob)) == B_PRED) {
464 int j = 0;
465 mbmi->is_4x4 = 1;
466 do {
467 mi->bmi[j].as_mode = read_bmode(bc, pbi->common.fc.bmode_prob);
468 } while (++j < 16);
469 }
470
471 mbmi->uv_mode = read_uv_mode(bc, pbi->common.fc.uv_mode_prob);
472 }
473 }
474
read_mb_features(vp8_reader * r,MB_MODE_INFO * mi,MACROBLOCKD * x)475 static void read_mb_features(vp8_reader *r, MB_MODE_INFO *mi, MACROBLOCKD *x) {
476 /* Is segmentation enabled */
477 if (x->segmentation_enabled && x->update_mb_segmentation_map) {
478 /* If so then read the segment id. */
479 if (vp8_read(r, x->mb_segment_tree_probs[0])) {
480 mi->segment_id =
481 (unsigned char)(2 + vp8_read(r, x->mb_segment_tree_probs[2]));
482 } else {
483 mi->segment_id =
484 (unsigned char)(vp8_read(r, x->mb_segment_tree_probs[1]));
485 }
486 }
487 }
488
decode_mb_mode_mvs(VP8D_COMP * pbi,MODE_INFO * mi)489 static void decode_mb_mode_mvs(VP8D_COMP *pbi, MODE_INFO *mi) {
490 /* Read the Macroblock segmentation map if it is being updated explicitly
491 * this frame (reset to 0 above by default)
492 * By default on a key frame reset all MBs to segment 0
493 */
494 if (pbi->mb.update_mb_segmentation_map) {
495 read_mb_features(&pbi->mbc[8], &mi->mbmi, &pbi->mb);
496 } else if (pbi->common.frame_type == KEY_FRAME) {
497 mi->mbmi.segment_id = 0;
498 }
499
500 /* Read the macroblock coeff skip flag if this feature is in use,
501 * else default to 0 */
502 if (pbi->common.mb_no_coeff_skip) {
503 mi->mbmi.mb_skip_coeff = vp8_read(&pbi->mbc[8], pbi->prob_skip_false);
504 } else {
505 mi->mbmi.mb_skip_coeff = 0;
506 }
507
508 mi->mbmi.is_4x4 = 0;
509 if (pbi->common.frame_type == KEY_FRAME) {
510 read_kf_modes(pbi, mi);
511 } else {
512 read_mb_modes_mv(pbi, mi, &mi->mbmi);
513 }
514 }
515
vp8_decode_mode_mvs(VP8D_COMP * pbi)516 void vp8_decode_mode_mvs(VP8D_COMP *pbi) {
517 MODE_INFO *mi = pbi->common.mi;
518 int mb_row = -1;
519 int mb_to_right_edge_start;
520
521 mb_mode_mv_init(pbi);
522
523 pbi->mb.mb_to_top_edge = 0;
524 pbi->mb.mb_to_bottom_edge = ((pbi->common.mb_rows - 1) * 16) << 3;
525 mb_to_right_edge_start = ((pbi->common.mb_cols - 1) * 16) << 3;
526
527 while (++mb_row < pbi->common.mb_rows) {
528 int mb_col = -1;
529
530 pbi->mb.mb_to_left_edge = 0;
531 pbi->mb.mb_to_right_edge = mb_to_right_edge_start;
532
533 while (++mb_col < pbi->common.mb_cols) {
534 #if CONFIG_ERROR_CONCEALMENT
535 int mb_num = mb_row * pbi->common.mb_cols + mb_col;
536 #endif
537
538 decode_mb_mode_mvs(pbi, mi);
539
540 #if CONFIG_ERROR_CONCEALMENT
541 /* look for corruption. set mvs_corrupt_from_mb to the current
542 * mb_num if the frame is corrupt from this macroblock. */
543 if (vp8dx_bool_error(&pbi->mbc[8]) &&
544 mb_num < (int)pbi->mvs_corrupt_from_mb) {
545 pbi->mvs_corrupt_from_mb = mb_num;
546 /* no need to continue since the partition is corrupt from
547 * here on.
548 */
549 return;
550 }
551 #endif
552
553 pbi->mb.mb_to_left_edge -= (16 << 3);
554 pbi->mb.mb_to_right_edge -= (16 << 3);
555 mi++; /* next macroblock */
556 }
557 pbi->mb.mb_to_top_edge -= (16 << 3);
558 pbi->mb.mb_to_bottom_edge -= (16 << 3);
559
560 mi++; /* skip left predictor each row */
561 }
562 }
563