1 /*
2  *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_UNITTEST_H_
12 #define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_UNITTEST_H_
13 
14 #include <algorithm>
15 #include <vector>
16 
17 #include "webrtc/base/checks.h"
18 #include "webrtc/base/scoped_ptr.h"
19 #include "webrtc/common.h"
20 #include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
21 #include "webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h"
22 #include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
23 #include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
24 #include "webrtc/video_frame.h"
25 
26 #include "gtest/gtest.h"
27 
28 using ::testing::_;
29 using ::testing::AllOf;
30 using ::testing::Field;
31 using ::testing::Return;
32 
33 namespace webrtc {
34 namespace testing {
35 
36 const int kDefaultWidth = 1280;
37 const int kDefaultHeight = 720;
38 const int kNumberOfSimulcastStreams = 3;
39 const int kColorY = 66;
40 const int kColorU = 22;
41 const int kColorV = 33;
42 const int kMaxBitrates[kNumberOfSimulcastStreams] = {150, 600, 1200};
43 const int kMinBitrates[kNumberOfSimulcastStreams] = {50, 150, 600};
44 const int kTargetBitrates[kNumberOfSimulcastStreams] = {100, 450, 1000};
45 const int kDefaultTemporalLayerProfile[3] = {3, 3, 3};
46 
47 template <typename T>
SetExpectedValues3(T value0,T value1,T value2,T * expected_values)48 void SetExpectedValues3(T value0, T value1, T value2, T* expected_values) {
49   expected_values[0] = value0;
50   expected_values[1] = value1;
51   expected_values[2] = value2;
52 }
53 
54 class Vp8TestEncodedImageCallback : public EncodedImageCallback {
55  public:
Vp8TestEncodedImageCallback()56   Vp8TestEncodedImageCallback() : picture_id_(-1) {
57     memset(temporal_layer_, -1, sizeof(temporal_layer_));
58     memset(layer_sync_, false, sizeof(layer_sync_));
59   }
60 
~Vp8TestEncodedImageCallback()61   ~Vp8TestEncodedImageCallback() {
62     delete[] encoded_key_frame_._buffer;
63     delete[] encoded_frame_._buffer;
64   }
65 
Encoded(const EncodedImage & encoded_image,const CodecSpecificInfo * codec_specific_info,const RTPFragmentationHeader * fragmentation)66   virtual int32_t Encoded(const EncodedImage& encoded_image,
67                           const CodecSpecificInfo* codec_specific_info,
68                           const RTPFragmentationHeader* fragmentation) {
69     // Only store the base layer.
70     if (codec_specific_info->codecSpecific.VP8.simulcastIdx == 0) {
71       if (encoded_image._frameType == kVideoFrameKey) {
72         delete[] encoded_key_frame_._buffer;
73         encoded_key_frame_._buffer = new uint8_t[encoded_image._size];
74         encoded_key_frame_._size = encoded_image._size;
75         encoded_key_frame_._length = encoded_image._length;
76         encoded_key_frame_._frameType = kVideoFrameKey;
77         encoded_key_frame_._completeFrame = encoded_image._completeFrame;
78         memcpy(encoded_key_frame_._buffer, encoded_image._buffer,
79                encoded_image._length);
80       } else {
81         delete[] encoded_frame_._buffer;
82         encoded_frame_._buffer = new uint8_t[encoded_image._size];
83         encoded_frame_._size = encoded_image._size;
84         encoded_frame_._length = encoded_image._length;
85         memcpy(encoded_frame_._buffer, encoded_image._buffer,
86                encoded_image._length);
87       }
88     }
89     picture_id_ = codec_specific_info->codecSpecific.VP8.pictureId;
90     layer_sync_[codec_specific_info->codecSpecific.VP8.simulcastIdx] =
91         codec_specific_info->codecSpecific.VP8.layerSync;
92     temporal_layer_[codec_specific_info->codecSpecific.VP8.simulcastIdx] =
93         codec_specific_info->codecSpecific.VP8.temporalIdx;
94     return 0;
95   }
GetLastEncodedFrameInfo(int * picture_id,int * temporal_layer,bool * layer_sync,int stream)96   void GetLastEncodedFrameInfo(int* picture_id,
97                                int* temporal_layer,
98                                bool* layer_sync,
99                                int stream) {
100     *picture_id = picture_id_;
101     *temporal_layer = temporal_layer_[stream];
102     *layer_sync = layer_sync_[stream];
103   }
GetLastEncodedKeyFrame(EncodedImage * encoded_key_frame)104   void GetLastEncodedKeyFrame(EncodedImage* encoded_key_frame) {
105     *encoded_key_frame = encoded_key_frame_;
106   }
GetLastEncodedFrame(EncodedImage * encoded_frame)107   void GetLastEncodedFrame(EncodedImage* encoded_frame) {
108     *encoded_frame = encoded_frame_;
109   }
110 
111  private:
112   EncodedImage encoded_key_frame_;
113   EncodedImage encoded_frame_;
114   int picture_id_;
115   int temporal_layer_[kNumberOfSimulcastStreams];
116   bool layer_sync_[kNumberOfSimulcastStreams];
117 };
118 
119 class Vp8TestDecodedImageCallback : public DecodedImageCallback {
120  public:
Vp8TestDecodedImageCallback()121   Vp8TestDecodedImageCallback() : decoded_frames_(0) {}
Decoded(VideoFrame & decoded_image)122   int32_t Decoded(VideoFrame& decoded_image) override {
123     for (int i = 0; i < decoded_image.width(); ++i) {
124       EXPECT_NEAR(kColorY, decoded_image.buffer(kYPlane)[i], 1);
125     }
126 
127     // TODO(mikhal): Verify the difference between U,V and the original.
128     for (int i = 0; i < ((decoded_image.width() + 1) / 2); ++i) {
129       EXPECT_NEAR(kColorU, decoded_image.buffer(kUPlane)[i], 4);
130       EXPECT_NEAR(kColorV, decoded_image.buffer(kVPlane)[i], 4);
131     }
132     decoded_frames_++;
133     return 0;
134   }
Decoded(VideoFrame & decoded_image,int64_t decode_time_ms)135   int32_t Decoded(VideoFrame& decoded_image, int64_t decode_time_ms) override {
136     RTC_NOTREACHED();
137     return -1;
138   }
DecodedFrames()139   int DecodedFrames() { return decoded_frames_; }
140 
141  private:
142   int decoded_frames_;
143 };
144 
145 class SkipEncodingUnusedStreamsTest {
146  public:
RunTest(VP8Encoder * encoder,VideoCodec * settings,uint32_t target_bitrate)147   std::vector<unsigned int> RunTest(VP8Encoder* encoder,
148                                     VideoCodec* settings,
149                                     uint32_t target_bitrate) {
150     Config options;
151     SpyingTemporalLayersFactory* spy_factory =
152         new SpyingTemporalLayersFactory();
153     options.Set<TemporalLayers::Factory>(spy_factory);
154     settings->extra_options = &options;
155     EXPECT_EQ(0, encoder->InitEncode(settings, 1, 1200));
156 
157     encoder->SetRates(target_bitrate, 30);
158 
159     std::vector<unsigned int> configured_bitrates;
160     for (std::vector<TemporalLayers*>::const_iterator it =
161              spy_factory->spying_layers_.begin();
162          it != spy_factory->spying_layers_.end(); ++it) {
163       configured_bitrates.push_back(
164           static_cast<SpyingTemporalLayers*>(*it)->configured_bitrate_);
165     }
166     return configured_bitrates;
167   }
168 
169   class SpyingTemporalLayers : public TemporalLayers {
170    public:
SpyingTemporalLayers(TemporalLayers * layers)171     explicit SpyingTemporalLayers(TemporalLayers* layers)
172         : configured_bitrate_(0), layers_(layers) {}
173 
~SpyingTemporalLayers()174     virtual ~SpyingTemporalLayers() { delete layers_; }
175 
EncodeFlags(uint32_t timestamp)176     virtual int EncodeFlags(uint32_t timestamp) {
177       return layers_->EncodeFlags(timestamp);
178     }
179 
ConfigureBitrates(int bitrate_kbit,int max_bitrate_kbit,int framerate,vpx_codec_enc_cfg_t * cfg)180     bool ConfigureBitrates(int bitrate_kbit,
181                            int max_bitrate_kbit,
182                            int framerate,
183                            vpx_codec_enc_cfg_t* cfg) override {
184       configured_bitrate_ = bitrate_kbit;
185       return layers_->ConfigureBitrates(bitrate_kbit, max_bitrate_kbit,
186                                         framerate, cfg);
187     }
188 
PopulateCodecSpecific(bool base_layer_sync,CodecSpecificInfoVP8 * vp8_info,uint32_t timestamp)189     void PopulateCodecSpecific(bool base_layer_sync,
190                                CodecSpecificInfoVP8* vp8_info,
191                                uint32_t timestamp) override {
192       layers_->PopulateCodecSpecific(base_layer_sync, vp8_info, timestamp);
193     }
194 
FrameEncoded(unsigned int size,uint32_t timestamp,int qp)195     void FrameEncoded(unsigned int size, uint32_t timestamp, int qp) override {
196       layers_->FrameEncoded(size, timestamp, qp);
197     }
198 
CurrentLayerId()199     int CurrentLayerId() const override { return layers_->CurrentLayerId(); }
200 
UpdateConfiguration(vpx_codec_enc_cfg_t * cfg)201     bool UpdateConfiguration(vpx_codec_enc_cfg_t* cfg) override {
202       return false;
203     }
204 
205     int configured_bitrate_;
206     TemporalLayers* layers_;
207   };
208 
209   class SpyingTemporalLayersFactory : public TemporalLayers::Factory {
210    public:
~SpyingTemporalLayersFactory()211     virtual ~SpyingTemporalLayersFactory() {}
Create(int temporal_layers,uint8_t initial_tl0_pic_idx)212     TemporalLayers* Create(int temporal_layers,
213                            uint8_t initial_tl0_pic_idx) const override {
214       SpyingTemporalLayers* layers =
215           new SpyingTemporalLayers(TemporalLayers::Factory::Create(
216               temporal_layers, initial_tl0_pic_idx));
217       spying_layers_.push_back(layers);
218       return layers;
219     }
220 
221     mutable std::vector<TemporalLayers*> spying_layers_;
222   };
223 };
224 
225 class TestVp8Simulcast : public ::testing::Test {
226  public:
TestVp8Simulcast(VP8Encoder * encoder,VP8Decoder * decoder)227   TestVp8Simulcast(VP8Encoder* encoder, VP8Decoder* decoder)
228       : encoder_(encoder), decoder_(decoder) {}
229 
230   // Creates an VideoFrame from |plane_colors|.
CreateImage(VideoFrame * frame,int plane_colors[kNumOfPlanes])231   static void CreateImage(VideoFrame* frame, int plane_colors[kNumOfPlanes]) {
232     for (int plane_num = 0; plane_num < kNumOfPlanes; ++plane_num) {
233       int width =
234           (plane_num != kYPlane ? (frame->width() + 1) / 2 : frame->width());
235       int height =
236           (plane_num != kYPlane ? (frame->height() + 1) / 2 : frame->height());
237       PlaneType plane_type = static_cast<PlaneType>(plane_num);
238       uint8_t* data = frame->buffer(plane_type);
239       // Setting allocated area to zero - setting only image size to
240       // requested values - will make it easier to distinguish between image
241       // size and frame size (accounting for stride).
242       memset(frame->buffer(plane_type), 0, frame->allocated_size(plane_type));
243       for (int i = 0; i < height; i++) {
244         memset(data, plane_colors[plane_num], width);
245         data += frame->stride(plane_type);
246       }
247     }
248   }
249 
DefaultSettings(VideoCodec * settings,const int * temporal_layer_profile)250   static void DefaultSettings(VideoCodec* settings,
251                               const int* temporal_layer_profile) {
252     assert(settings);
253     memset(settings, 0, sizeof(VideoCodec));
254     strncpy(settings->plName, "VP8", 4);
255     settings->codecType = kVideoCodecVP8;
256     // 96 to 127 dynamic payload types for video codecs
257     settings->plType = 120;
258     settings->startBitrate = 300;
259     settings->minBitrate = 30;
260     settings->maxBitrate = 0;
261     settings->maxFramerate = 30;
262     settings->width = kDefaultWidth;
263     settings->height = kDefaultHeight;
264     settings->numberOfSimulcastStreams = kNumberOfSimulcastStreams;
265     ASSERT_EQ(3, kNumberOfSimulcastStreams);
266     ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4, kMaxBitrates[0],
267                     kMinBitrates[0], kTargetBitrates[0],
268                     &settings->simulcastStream[0], temporal_layer_profile[0]);
269     ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2, kMaxBitrates[1],
270                     kMinBitrates[1], kTargetBitrates[1],
271                     &settings->simulcastStream[1], temporal_layer_profile[1]);
272     ConfigureStream(kDefaultWidth, kDefaultHeight, kMaxBitrates[2],
273                     kMinBitrates[2], kTargetBitrates[2],
274                     &settings->simulcastStream[2], temporal_layer_profile[2]);
275     settings->codecSpecific.VP8.resilience = kResilientStream;
276     settings->codecSpecific.VP8.denoisingOn = true;
277     settings->codecSpecific.VP8.errorConcealmentOn = false;
278     settings->codecSpecific.VP8.automaticResizeOn = false;
279     settings->codecSpecific.VP8.feedbackModeOn = false;
280     settings->codecSpecific.VP8.frameDroppingOn = true;
281     settings->codecSpecific.VP8.keyFrameInterval = 3000;
282   }
283 
ConfigureStream(int width,int height,int max_bitrate,int min_bitrate,int target_bitrate,SimulcastStream * stream,int num_temporal_layers)284   static void ConfigureStream(int width,
285                               int height,
286                               int max_bitrate,
287                               int min_bitrate,
288                               int target_bitrate,
289                               SimulcastStream* stream,
290                               int num_temporal_layers) {
291     assert(stream);
292     stream->width = width;
293     stream->height = height;
294     stream->maxBitrate = max_bitrate;
295     stream->minBitrate = min_bitrate;
296     stream->targetBitrate = target_bitrate;
297     stream->numberOfTemporalLayers = num_temporal_layers;
298     stream->qpMax = 45;
299   }
300 
301  protected:
SetUp()302   virtual void SetUp() { SetUpCodec(kDefaultTemporalLayerProfile); }
303 
SetUpCodec(const int * temporal_layer_profile)304   virtual void SetUpCodec(const int* temporal_layer_profile) {
305     encoder_->RegisterEncodeCompleteCallback(&encoder_callback_);
306     decoder_->RegisterDecodeCompleteCallback(&decoder_callback_);
307     DefaultSettings(&settings_, temporal_layer_profile);
308     EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
309     EXPECT_EQ(0, decoder_->InitDecode(&settings_, 1));
310     int half_width = (kDefaultWidth + 1) / 2;
311     input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, kDefaultWidth,
312                                   half_width, half_width);
313     memset(input_frame_.buffer(kYPlane), 0,
314            input_frame_.allocated_size(kYPlane));
315     memset(input_frame_.buffer(kUPlane), 0,
316            input_frame_.allocated_size(kUPlane));
317     memset(input_frame_.buffer(kVPlane), 0,
318            input_frame_.allocated_size(kVPlane));
319   }
320 
TearDown()321   virtual void TearDown() {
322     encoder_->Release();
323     decoder_->Release();
324   }
325 
ExpectStreams(FrameType frame_type,int expected_video_streams)326   void ExpectStreams(FrameType frame_type, int expected_video_streams) {
327     ASSERT_GE(expected_video_streams, 0);
328     ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams);
329     if (expected_video_streams >= 1) {
330       EXPECT_CALL(
331           encoder_callback_,
332           Encoded(
333               AllOf(Field(&EncodedImage::_frameType, frame_type),
334                     Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
335                     Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)),
336               _, _))
337           .Times(1)
338           .WillRepeatedly(Return(0));
339     }
340     if (expected_video_streams >= 2) {
341       EXPECT_CALL(
342           encoder_callback_,
343           Encoded(
344               AllOf(Field(&EncodedImage::_frameType, frame_type),
345                     Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
346                     Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)),
347               _, _))
348           .Times(1)
349           .WillRepeatedly(Return(0));
350     }
351     if (expected_video_streams >= 3) {
352       EXPECT_CALL(
353           encoder_callback_,
354           Encoded(AllOf(Field(&EncodedImage::_frameType, frame_type),
355                         Field(&EncodedImage::_encodedWidth, kDefaultWidth),
356                         Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
357                   _, _))
358           .Times(1)
359           .WillRepeatedly(Return(0));
360     }
361   }
362 
VerifyTemporalIdxAndSyncForAllSpatialLayers(Vp8TestEncodedImageCallback * encoder_callback,const int * expected_temporal_idx,const bool * expected_layer_sync,int num_spatial_layers)363   void VerifyTemporalIdxAndSyncForAllSpatialLayers(
364       Vp8TestEncodedImageCallback* encoder_callback,
365       const int* expected_temporal_idx,
366       const bool* expected_layer_sync,
367       int num_spatial_layers) {
368     int picture_id = -1;
369     int temporal_layer = -1;
370     bool layer_sync = false;
371     for (int i = 0; i < num_spatial_layers; i++) {
372       encoder_callback->GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
373                                                 &layer_sync, i);
374       EXPECT_EQ(expected_temporal_idx[i], temporal_layer);
375       EXPECT_EQ(expected_layer_sync[i], layer_sync);
376     }
377   }
378 
379   // We currently expect all active streams to generate a key frame even though
380   // a key frame was only requested for some of them.
TestKeyFrameRequestsOnAllStreams()381   void TestKeyFrameRequestsOnAllStreams() {
382     encoder_->SetRates(kMaxBitrates[2], 30);  // To get all three streams.
383     std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
384                                        kVideoFrameDelta);
385     ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
386     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
387 
388     ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
389     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
390     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
391 
392     frame_types[0] = kVideoFrameKey;
393     ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
394     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
395     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
396 
397     std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
398     frame_types[1] = kVideoFrameKey;
399     ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
400     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
401     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
402 
403     std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
404     frame_types[2] = kVideoFrameKey;
405     ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
406     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
407     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
408 
409     std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
410     ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
411     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
412     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
413   }
414 
TestPaddingAllStreams()415   void TestPaddingAllStreams() {
416     // We should always encode the base layer.
417     encoder_->SetRates(kMinBitrates[0] - 1, 30);
418     std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
419                                        kVideoFrameDelta);
420     ExpectStreams(kVideoFrameKey, 1);
421     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
422 
423     ExpectStreams(kVideoFrameDelta, 1);
424     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
425     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
426   }
427 
TestPaddingTwoStreams()428   void TestPaddingTwoStreams() {
429     // We have just enough to get only the first stream and padding for two.
430     encoder_->SetRates(kMinBitrates[0], 30);
431     std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
432                                        kVideoFrameDelta);
433     ExpectStreams(kVideoFrameKey, 1);
434     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
435 
436     ExpectStreams(kVideoFrameDelta, 1);
437     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
438     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
439   }
440 
TestPaddingTwoStreamsOneMaxedOut()441   void TestPaddingTwoStreamsOneMaxedOut() {
442     // We are just below limit of sending second stream, so we should get
443     // the first stream maxed out (at |maxBitrate|), and padding for two.
444     encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
445     std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
446                                        kVideoFrameDelta);
447     ExpectStreams(kVideoFrameKey, 1);
448     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
449 
450     ExpectStreams(kVideoFrameDelta, 1);
451     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
452     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
453   }
454 
TestPaddingOneStream()455   void TestPaddingOneStream() {
456     // We have just enough to send two streams, so padding for one stream.
457     encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1], 30);
458     std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
459                                        kVideoFrameDelta);
460     ExpectStreams(kVideoFrameKey, 2);
461     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
462 
463     ExpectStreams(kVideoFrameDelta, 2);
464     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
465     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
466   }
467 
TestPaddingOneStreamTwoMaxedOut()468   void TestPaddingOneStreamTwoMaxedOut() {
469     // We are just below limit of sending third stream, so we should get
470     // first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
471     encoder_->SetRates(
472         kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
473     std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
474                                        kVideoFrameDelta);
475     ExpectStreams(kVideoFrameKey, 2);
476     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
477 
478     ExpectStreams(kVideoFrameDelta, 2);
479     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
480     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
481   }
482 
TestSendAllStreams()483   void TestSendAllStreams() {
484     // We have just enough to send all streams.
485     encoder_->SetRates(
486         kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30);
487     std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
488                                        kVideoFrameDelta);
489     ExpectStreams(kVideoFrameKey, 3);
490     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
491 
492     ExpectStreams(kVideoFrameDelta, 3);
493     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
494     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
495   }
496 
TestDisablingStreams()497   void TestDisablingStreams() {
498     // We should get three media streams.
499     encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
500     std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
501                                        kVideoFrameDelta);
502     ExpectStreams(kVideoFrameKey, 3);
503     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
504 
505     ExpectStreams(kVideoFrameDelta, 3);
506     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
507     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
508 
509     // We should only get two streams and padding for one.
510     encoder_->SetRates(
511         kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
512     ExpectStreams(kVideoFrameDelta, 2);
513     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
514     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
515 
516     // We should only get the first stream and padding for two.
517     encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30);
518     ExpectStreams(kVideoFrameDelta, 1);
519     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
520     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
521 
522     // We don't have enough bitrate for the thumbnail stream, but we should get
523     // it anyway with current configuration.
524     encoder_->SetRates(kTargetBitrates[0] - 1, 30);
525     ExpectStreams(kVideoFrameDelta, 1);
526     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
527     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
528 
529     // We should only get two streams and padding for one.
530     encoder_->SetRates(
531         kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
532     // We get a key frame because a new stream is being enabled.
533     ExpectStreams(kVideoFrameKey, 2);
534     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
535     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
536 
537     // We should get all three streams.
538     encoder_->SetRates(
539         kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30);
540     // We get a key frame because a new stream is being enabled.
541     ExpectStreams(kVideoFrameKey, 3);
542     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
543     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
544   }
545 
SwitchingToOneStream(int width,int height)546   void SwitchingToOneStream(int width, int height) {
547     // Disable all streams except the last and set the bitrate of the last to
548     // 100 kbps. This verifies the way GTP switches to screenshare mode.
549     settings_.codecSpecific.VP8.numberOfTemporalLayers = 1;
550     settings_.maxBitrate = 100;
551     settings_.startBitrate = 100;
552     settings_.width = width;
553     settings_.height = height;
554     for (int i = 0; i < settings_.numberOfSimulcastStreams - 1; ++i) {
555       settings_.simulcastStream[i].maxBitrate = 0;
556       settings_.simulcastStream[i].width = settings_.width;
557       settings_.simulcastStream[i].height = settings_.height;
558     }
559     // Setting input image to new resolution.
560     int half_width = (settings_.width + 1) / 2;
561     input_frame_.CreateEmptyFrame(settings_.width, settings_.height,
562                                   settings_.width, half_width, half_width);
563     memset(input_frame_.buffer(kYPlane), 0,
564            input_frame_.allocated_size(kYPlane));
565     memset(input_frame_.buffer(kUPlane), 0,
566            input_frame_.allocated_size(kUPlane));
567     memset(input_frame_.buffer(kVPlane), 0,
568            input_frame_.allocated_size(kVPlane));
569 
570     // The for loop above did not set the bitrate of the highest layer.
571     settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1]
572         .maxBitrate = 0;
573     // The highest layer has to correspond to the non-simulcast resolution.
574     settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].width =
575         settings_.width;
576     settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].height =
577         settings_.height;
578     EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
579 
580     // Encode one frame and verify.
581     encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
582     std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
583                                        kVideoFrameDelta);
584     EXPECT_CALL(encoder_callback_,
585                 Encoded(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey),
586                               Field(&EncodedImage::_encodedWidth, width),
587                               Field(&EncodedImage::_encodedHeight, height)),
588                         _, _))
589         .Times(1)
590         .WillRepeatedly(Return(0));
591     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
592 
593     // Switch back.
594     DefaultSettings(&settings_, kDefaultTemporalLayerProfile);
595     // Start at the lowest bitrate for enabling base stream.
596     settings_.startBitrate = kMinBitrates[0];
597     EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
598     encoder_->SetRates(settings_.startBitrate, 30);
599     ExpectStreams(kVideoFrameKey, 1);
600     // Resize |input_frame_| to the new resolution.
601     half_width = (settings_.width + 1) / 2;
602     input_frame_.CreateEmptyFrame(settings_.width, settings_.height,
603                                   settings_.width, half_width, half_width);
604     memset(input_frame_.buffer(kYPlane), 0,
605            input_frame_.allocated_size(kYPlane));
606     memset(input_frame_.buffer(kUPlane), 0,
607            input_frame_.allocated_size(kUPlane));
608     memset(input_frame_.buffer(kVPlane), 0,
609            input_frame_.allocated_size(kVPlane));
610     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
611   }
612 
TestSwitchingToOneStream()613   void TestSwitchingToOneStream() { SwitchingToOneStream(1024, 768); }
614 
TestSwitchingToOneOddStream()615   void TestSwitchingToOneOddStream() { SwitchingToOneStream(1023, 769); }
616 
TestRPSIEncoder()617   void TestRPSIEncoder() {
618     Vp8TestEncodedImageCallback encoder_callback;
619     encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
620 
621     encoder_->SetRates(kMaxBitrates[2], 30);  // To get all three streams.
622 
623     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
624     int picture_id = -1;
625     int temporal_layer = -1;
626     bool layer_sync = false;
627     encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
628                                              &layer_sync, 0);
629     EXPECT_EQ(0, temporal_layer);
630     EXPECT_TRUE(layer_sync);
631     int key_frame_picture_id = picture_id;
632 
633     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
634     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
635     encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
636                                              &layer_sync, 0);
637     EXPECT_EQ(2, temporal_layer);
638     EXPECT_TRUE(layer_sync);
639 
640     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
641     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
642     encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
643                                              &layer_sync, 0);
644     EXPECT_EQ(1, temporal_layer);
645     EXPECT_TRUE(layer_sync);
646 
647     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
648     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
649     encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
650                                              &layer_sync, 0);
651     EXPECT_EQ(2, temporal_layer);
652     EXPECT_FALSE(layer_sync);
653 
654     CodecSpecificInfo codec_specific;
655     codec_specific.codecType = kVideoCodecVP8;
656     codec_specific.codecSpecific.VP8.hasReceivedRPSI = true;
657 
658     // Must match last key frame to trigger.
659     codec_specific.codecSpecific.VP8.pictureIdRPSI = key_frame_picture_id;
660 
661     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
662     EXPECT_EQ(0, encoder_->Encode(input_frame_, &codec_specific, NULL));
663     encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
664                                              &layer_sync, 0);
665 
666     EXPECT_EQ(0, temporal_layer);
667     EXPECT_TRUE(layer_sync);
668 
669     // Must match last key frame to trigger, test bad id.
670     codec_specific.codecSpecific.VP8.pictureIdRPSI = key_frame_picture_id + 17;
671 
672     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
673     EXPECT_EQ(0, encoder_->Encode(input_frame_, &codec_specific, NULL));
674     encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
675                                              &layer_sync, 0);
676 
677     EXPECT_EQ(2, temporal_layer);
678     // The previous frame was a base layer sync (since it was a frame that
679     // only predicts from key frame and hence resets the temporal pattern),
680     // so this frame (the next one) must have |layer_sync| set to true.
681     EXPECT_TRUE(layer_sync);
682   }
683 
TestRPSIEncodeDecode()684   void TestRPSIEncodeDecode() {
685     Vp8TestEncodedImageCallback encoder_callback;
686     Vp8TestDecodedImageCallback decoder_callback;
687     encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
688     decoder_->RegisterDecodeCompleteCallback(&decoder_callback);
689 
690     encoder_->SetRates(kMaxBitrates[2], 30);  // To get all three streams.
691 
692     // Set color.
693     int plane_offset[kNumOfPlanes];
694     plane_offset[kYPlane] = kColorY;
695     plane_offset[kUPlane] = kColorU;
696     plane_offset[kVPlane] = kColorV;
697     CreateImage(&input_frame_, plane_offset);
698 
699     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
700     int picture_id = -1;
701     int temporal_layer = -1;
702     bool layer_sync = false;
703     encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
704                                              &layer_sync, 0);
705     EXPECT_EQ(0, temporal_layer);
706     EXPECT_TRUE(layer_sync);
707     int key_frame_picture_id = picture_id;
708 
709     // Change color.
710     plane_offset[kYPlane] += 1;
711     plane_offset[kUPlane] += 1;
712     plane_offset[kVPlane] += 1;
713     CreateImage(&input_frame_, plane_offset);
714     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
715     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
716 
717     // Change color.
718     plane_offset[kYPlane] += 1;
719     plane_offset[kUPlane] += 1;
720     plane_offset[kVPlane] += 1;
721     CreateImage(&input_frame_, plane_offset);
722 
723     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
724     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
725 
726     // Change color.
727     plane_offset[kYPlane] += 1;
728     plane_offset[kUPlane] += 1;
729     plane_offset[kVPlane] += 1;
730     CreateImage(&input_frame_, plane_offset);
731 
732     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
733     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
734 
735     CodecSpecificInfo codec_specific;
736     codec_specific.codecType = kVideoCodecVP8;
737     codec_specific.codecSpecific.VP8.hasReceivedRPSI = true;
738     // Must match last key frame to trigger.
739     codec_specific.codecSpecific.VP8.pictureIdRPSI = key_frame_picture_id;
740 
741     // Change color back to original.
742     plane_offset[kYPlane] = kColorY;
743     plane_offset[kUPlane] = kColorU;
744     plane_offset[kVPlane] = kColorV;
745     CreateImage(&input_frame_, plane_offset);
746 
747     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
748     EXPECT_EQ(0, encoder_->Encode(input_frame_, &codec_specific, NULL));
749 
750     EncodedImage encoded_frame;
751     encoder_callback.GetLastEncodedKeyFrame(&encoded_frame);
752     decoder_->Decode(encoded_frame, false, NULL);
753     encoder_callback.GetLastEncodedFrame(&encoded_frame);
754     decoder_->Decode(encoded_frame, false, NULL);
755     EXPECT_EQ(2, decoder_callback.DecodedFrames());
756   }
757 
758   // Test the layer pattern and sync flag for various spatial-temporal patterns.
759   // 3-3-3 pattern: 3 temporal layers for all spatial streams, so same
760   // temporal_layer id and layer_sync is expected for all streams.
TestSaptioTemporalLayers333PatternEncoder()761   void TestSaptioTemporalLayers333PatternEncoder() {
762     Vp8TestEncodedImageCallback encoder_callback;
763     encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
764     encoder_->SetRates(kMaxBitrates[2], 30);  // To get all three streams.
765 
766     int expected_temporal_idx[3] = {-1, -1, -1};
767     bool expected_layer_sync[3] = {false, false, false};
768 
769     // First frame: #0.
770     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
771     SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
772     SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
773     VerifyTemporalIdxAndSyncForAllSpatialLayers(
774         &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
775 
776     // Next frame: #1.
777     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
778     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
779     SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
780     SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
781     VerifyTemporalIdxAndSyncForAllSpatialLayers(
782         &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
783 
784     // Next frame: #2.
785     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
786     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
787     SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx);
788     SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
789     VerifyTemporalIdxAndSyncForAllSpatialLayers(
790         &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
791 
792     // Next frame: #3.
793     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
794     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
795     SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
796     SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
797     VerifyTemporalIdxAndSyncForAllSpatialLayers(
798         &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
799 
800     // Next frame: #4.
801     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
802     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
803     SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
804     SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
805     VerifyTemporalIdxAndSyncForAllSpatialLayers(
806         &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
807 
808     // Next frame: #5.
809     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
810     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
811     SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
812     SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
813     VerifyTemporalIdxAndSyncForAllSpatialLayers(
814         &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
815   }
816 
817   // Test the layer pattern and sync flag for various spatial-temporal patterns.
818   // 3-2-1 pattern: 3 temporal layers for lowest resolution, 2 for middle, and
819   // 1 temporal layer for highest resolution.
820   // For this profile, we expect the temporal index pattern to be:
821   // 1st stream: 0, 2, 1, 2, ....
822   // 2nd stream: 0, 1, 0, 1, ...
823   // 3rd stream: -1, -1, -1, -1, ....
824   // Regarding the 3rd stream, note that a stream/encoder with 1 temporal layer
825   // should always have temporal layer idx set to kNoTemporalIdx = -1.
826   // Since CodecSpecificInfoVP8.temporalIdx is uint8_t, this will wrap to 255.
827   // TODO(marpan): Although this seems safe for now, we should fix this.
TestSpatioTemporalLayers321PatternEncoder()828   void TestSpatioTemporalLayers321PatternEncoder() {
829     int temporal_layer_profile[3] = {3, 2, 1};
830     SetUpCodec(temporal_layer_profile);
831     Vp8TestEncodedImageCallback encoder_callback;
832     encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
833     encoder_->SetRates(kMaxBitrates[2], 30);  // To get all three streams.
834 
835     int expected_temporal_idx[3] = {-1, -1, -1};
836     bool expected_layer_sync[3] = {false, false, false};
837 
838     // First frame: #0.
839     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
840     SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
841     SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
842     VerifyTemporalIdxAndSyncForAllSpatialLayers(
843         &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
844 
845     // Next frame: #1.
846     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
847     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
848     SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
849     SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
850     VerifyTemporalIdxAndSyncForAllSpatialLayers(
851         &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
852 
853     // Next frame: #2.
854     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
855     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
856     SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx);
857     SetExpectedValues3<bool>(true, false, false, expected_layer_sync);
858     VerifyTemporalIdxAndSyncForAllSpatialLayers(
859         &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
860 
861     // Next frame: #3.
862     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
863     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
864     SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
865     SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
866     VerifyTemporalIdxAndSyncForAllSpatialLayers(
867         &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
868 
869     // Next frame: #4.
870     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
871     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
872     SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
873     SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
874     VerifyTemporalIdxAndSyncForAllSpatialLayers(
875         &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
876 
877     // Next frame: #5.
878     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
879     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
880     SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
881     SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
882     VerifyTemporalIdxAndSyncForAllSpatialLayers(
883         &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
884   }
885 
TestStrideEncodeDecode()886   void TestStrideEncodeDecode() {
887     Vp8TestEncodedImageCallback encoder_callback;
888     Vp8TestDecodedImageCallback decoder_callback;
889     encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
890     decoder_->RegisterDecodeCompleteCallback(&decoder_callback);
891 
892     encoder_->SetRates(kMaxBitrates[2], 30);  // To get all three streams.
893     // Setting two (possibly) problematic use cases for stride:
894     // 1. stride > width 2. stride_y != stride_uv/2
895     int stride_y = kDefaultWidth + 20;
896     int stride_uv = ((kDefaultWidth + 1) / 2) + 5;
897     input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, stride_y,
898                                   stride_uv, stride_uv);
899     // Set color.
900     int plane_offset[kNumOfPlanes];
901     plane_offset[kYPlane] = kColorY;
902     plane_offset[kUPlane] = kColorU;
903     plane_offset[kVPlane] = kColorV;
904     CreateImage(&input_frame_, plane_offset);
905 
906     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
907 
908     // Change color.
909     plane_offset[kYPlane] += 1;
910     plane_offset[kUPlane] += 1;
911     plane_offset[kVPlane] += 1;
912     CreateImage(&input_frame_, plane_offset);
913     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
914     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
915 
916     EncodedImage encoded_frame;
917     // Only encoding one frame - so will be a key frame.
918     encoder_callback.GetLastEncodedKeyFrame(&encoded_frame);
919     EXPECT_EQ(0, decoder_->Decode(encoded_frame, false, NULL));
920     encoder_callback.GetLastEncodedFrame(&encoded_frame);
921     decoder_->Decode(encoded_frame, false, NULL);
922     EXPECT_EQ(2, decoder_callback.DecodedFrames());
923   }
924 
TestSkipEncodingUnusedStreams()925   void TestSkipEncodingUnusedStreams() {
926     SkipEncodingUnusedStreamsTest test;
927     std::vector<unsigned int> configured_bitrate =
928         test.RunTest(encoder_.get(), &settings_,
929                      1);  // Target bit rate 1, to force all streams but the
930                           // base one to be exceeding bandwidth constraints.
931     EXPECT_EQ(static_cast<size_t>(kNumberOfSimulcastStreams),
932               configured_bitrate.size());
933 
934     unsigned int min_bitrate =
935         std::max(settings_.simulcastStream[0].minBitrate, settings_.minBitrate);
936     int stream = 0;
937     for (std::vector<unsigned int>::const_iterator it =
938              configured_bitrate.begin();
939          it != configured_bitrate.end(); ++it) {
940       if (stream == 0) {
941         EXPECT_EQ(min_bitrate, *it);
942       } else {
943         EXPECT_EQ(0u, *it);
944       }
945       ++stream;
946     }
947   }
948 
949   rtc::scoped_ptr<VP8Encoder> encoder_;
950   MockEncodedImageCallback encoder_callback_;
951   rtc::scoped_ptr<VP8Decoder> decoder_;
952   MockDecodedImageCallback decoder_callback_;
953   VideoCodec settings_;
954   VideoFrame input_frame_;
955 };
956 
957 }  // namespace testing
958 }  // namespace webrtc
959 
960 #endif  // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_UNITTEST_H_
961