1 /*
2 * Copyright 2017 The WebRTC Project Authors. All rights reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "video/rtp_video_stream_receiver.h"
12
13 #include <memory>
14 #include <utility>
15
16 #include "api/video/video_codec_type.h"
17 #include "api/video/video_frame_type.h"
18 #include "common_video/h264/h264_common.h"
19 #include "media/base/media_constants.h"
20 #include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
21 #include "modules/rtp_rtcp/source/rtp_format.h"
22 #include "modules/rtp_rtcp/source/rtp_format_vp9.h"
23 #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
24 #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
25 #include "modules/rtp_rtcp/source/rtp_header_extensions.h"
26 #include "modules/rtp_rtcp/source/rtp_packet_received.h"
27 #include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
28 #include "modules/utility/include/process_thread.h"
29 #include "modules/video_coding/frame_object.h"
30 #include "modules/video_coding/include/video_coding_defines.h"
31 #include "modules/video_coding/rtp_frame_reference_finder.h"
32 #include "rtc_base/byte_buffer.h"
33 #include "rtc_base/logging.h"
34 #include "system_wrappers/include/clock.h"
35 #include "system_wrappers/include/field_trial.h"
36 #include "test/field_trial.h"
37 #include "test/gmock.h"
38 #include "test/gtest.h"
39 #include "test/mock_frame_transformer.h"
40
41 using ::testing::_;
42 using ::testing::ElementsAre;
43 using ::testing::Invoke;
44 using ::testing::SizeIs;
45 using ::testing::Values;
46
47 namespace webrtc {
48
49 namespace {
50
51 const uint8_t kH264StartCode[] = {0x00, 0x00, 0x00, 0x01};
52
GetAbsoluteCaptureTimestamps(const video_coding::EncodedFrame * frame)53 std::vector<uint64_t> GetAbsoluteCaptureTimestamps(
54 const video_coding::EncodedFrame* frame) {
55 std::vector<uint64_t> result;
56 for (const auto& packet_info : frame->PacketInfos()) {
57 if (packet_info.absolute_capture_time()) {
58 result.push_back(
59 packet_info.absolute_capture_time()->absolute_capture_timestamp);
60 }
61 }
62 return result;
63 }
64
GetGenericVideoHeader(VideoFrameType frame_type)65 RTPVideoHeader GetGenericVideoHeader(VideoFrameType frame_type) {
66 RTPVideoHeader video_header;
67 video_header.is_first_packet_in_frame = true;
68 video_header.is_last_packet_in_frame = true;
69 video_header.codec = kVideoCodecGeneric;
70 video_header.frame_type = frame_type;
71 return video_header;
72 }
73
74 class MockTransport : public Transport {
75 public:
76 MOCK_METHOD(bool,
77 SendRtp,
78 (const uint8_t*, size_t length, const PacketOptions& options),
79 (override));
80 MOCK_METHOD(bool, SendRtcp, (const uint8_t*, size_t length), (override));
81 };
82
83 class MockNackSender : public NackSender {
84 public:
85 MOCK_METHOD(void,
86 SendNack,
87 (const std::vector<uint16_t>& sequence_numbers,
88 bool buffering_allowed),
89 (override));
90 };
91
92 class MockKeyFrameRequestSender : public KeyFrameRequestSender {
93 public:
94 MOCK_METHOD(void, RequestKeyFrame, (), (override));
95 };
96
97 class MockOnCompleteFrameCallback
98 : public video_coding::OnCompleteFrameCallback {
99 public:
100 MOCK_METHOD(void, DoOnCompleteFrame, (video_coding::EncodedFrame*), ());
101 MOCK_METHOD(void,
102 DoOnCompleteFrameFailNullptr,
103 (video_coding::EncodedFrame*),
104 ());
105 MOCK_METHOD(void,
106 DoOnCompleteFrameFailLength,
107 (video_coding::EncodedFrame*),
108 ());
109 MOCK_METHOD(void,
110 DoOnCompleteFrameFailBitstream,
111 (video_coding::EncodedFrame*),
112 ());
OnCompleteFrame(std::unique_ptr<video_coding::EncodedFrame> frame)113 void OnCompleteFrame(
114 std::unique_ptr<video_coding::EncodedFrame> frame) override {
115 if (!frame) {
116 DoOnCompleteFrameFailNullptr(nullptr);
117 return;
118 }
119 EXPECT_EQ(buffer_.Length(), frame->size());
120 if (buffer_.Length() != frame->size()) {
121 DoOnCompleteFrameFailLength(frame.get());
122 return;
123 }
124 if (frame->size() != buffer_.Length() ||
125 memcmp(buffer_.Data(), frame->data(), buffer_.Length()) != 0) {
126 DoOnCompleteFrameFailBitstream(frame.get());
127 return;
128 }
129 DoOnCompleteFrame(frame.get());
130 }
131
ClearExpectedBitstream()132 void ClearExpectedBitstream() { buffer_.Clear(); }
133
AppendExpectedBitstream(const uint8_t data[],size_t size_in_bytes)134 void AppendExpectedBitstream(const uint8_t data[], size_t size_in_bytes) {
135 // TODO(Johan): Let rtc::ByteBuffer handle uint8_t* instead of char*.
136 buffer_.WriteBytes(reinterpret_cast<const char*>(data), size_in_bytes);
137 }
138 rtc::ByteBufferWriter buffer_;
139 };
140
141 class MockRtpPacketSink : public RtpPacketSinkInterface {
142 public:
143 MOCK_METHOD(void, OnRtpPacket, (const RtpPacketReceived&), (override));
144 };
145
146 constexpr uint32_t kSsrc = 111;
147 constexpr uint16_t kSequenceNumber = 222;
148 constexpr int kPayloadType = 100;
149 constexpr int kRedPayloadType = 125;
150
CreateRtpPacketReceived()151 std::unique_ptr<RtpPacketReceived> CreateRtpPacketReceived() {
152 auto packet = std::make_unique<RtpPacketReceived>();
153 packet->SetSsrc(kSsrc);
154 packet->SetSequenceNumber(kSequenceNumber);
155 packet->SetPayloadType(kPayloadType);
156 return packet;
157 }
158
159 MATCHER_P(SamePacketAs, other, "") {
160 return arg.Ssrc() == other.Ssrc() &&
161 arg.SequenceNumber() == other.SequenceNumber();
162 }
163
164 } // namespace
165
166 class RtpVideoStreamReceiverTest : public ::testing::Test {
167 public:
RtpVideoStreamReceiverTest()168 RtpVideoStreamReceiverTest() : RtpVideoStreamReceiverTest("") {}
RtpVideoStreamReceiverTest(std::string field_trials)169 explicit RtpVideoStreamReceiverTest(std::string field_trials)
170 : override_field_trials_(field_trials),
171 config_(CreateConfig()),
172 process_thread_(ProcessThread::Create("TestThread")) {
173 rtp_receive_statistics_ =
174 ReceiveStatistics::Create(Clock::GetRealTimeClock());
175 rtp_video_stream_receiver_ = std::make_unique<RtpVideoStreamReceiver>(
176 Clock::GetRealTimeClock(), &mock_transport_, nullptr, nullptr, &config_,
177 rtp_receive_statistics_.get(), nullptr, nullptr, process_thread_.get(),
178 &mock_nack_sender_, &mock_key_frame_request_sender_,
179 &mock_on_complete_frame_callback_, nullptr, nullptr);
180 VideoCodec codec;
181 codec.plType = kPayloadType;
182 codec.codecType = kVideoCodecGeneric;
183 rtp_video_stream_receiver_->AddReceiveCodec(codec, {},
184 /*raw_payload=*/false);
185 }
186
GetDefaultH264VideoHeader()187 RTPVideoHeader GetDefaultH264VideoHeader() {
188 RTPVideoHeader video_header;
189 video_header.codec = kVideoCodecH264;
190 video_header.video_type_header.emplace<RTPVideoHeaderH264>();
191 return video_header;
192 }
193
194 // TODO(Johan): refactor h264_sps_pps_tracker_unittests.cc to avoid duplicate
195 // code.
AddSps(RTPVideoHeader * video_header,uint8_t sps_id,rtc::CopyOnWriteBuffer * data)196 void AddSps(RTPVideoHeader* video_header,
197 uint8_t sps_id,
198 rtc::CopyOnWriteBuffer* data) {
199 NaluInfo info;
200 info.type = H264::NaluType::kSps;
201 info.sps_id = sps_id;
202 info.pps_id = -1;
203 data->AppendData({H264::NaluType::kSps, sps_id});
204 auto& h264 = absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
205 h264.nalus[h264.nalus_length++] = info;
206 }
207
AddPps(RTPVideoHeader * video_header,uint8_t sps_id,uint8_t pps_id,rtc::CopyOnWriteBuffer * data)208 void AddPps(RTPVideoHeader* video_header,
209 uint8_t sps_id,
210 uint8_t pps_id,
211 rtc::CopyOnWriteBuffer* data) {
212 NaluInfo info;
213 info.type = H264::NaluType::kPps;
214 info.sps_id = sps_id;
215 info.pps_id = pps_id;
216 data->AppendData({H264::NaluType::kPps, pps_id});
217 auto& h264 = absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
218 h264.nalus[h264.nalus_length++] = info;
219 }
220
AddIdr(RTPVideoHeader * video_header,int pps_id)221 void AddIdr(RTPVideoHeader* video_header, int pps_id) {
222 NaluInfo info;
223 info.type = H264::NaluType::kIdr;
224 info.sps_id = -1;
225 info.pps_id = pps_id;
226 auto& h264 = absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
227 h264.nalus[h264.nalus_length++] = info;
228 }
229
230 protected:
CreateConfig()231 static VideoReceiveStream::Config CreateConfig() {
232 VideoReceiveStream::Config config(nullptr);
233 config.rtp.remote_ssrc = 1111;
234 config.rtp.local_ssrc = 2222;
235 config.rtp.red_payload_type = kRedPayloadType;
236 return config;
237 }
238
239 const webrtc::test::ScopedFieldTrials override_field_trials_;
240 VideoReceiveStream::Config config_;
241 MockNackSender mock_nack_sender_;
242 MockKeyFrameRequestSender mock_key_frame_request_sender_;
243 MockTransport mock_transport_;
244 MockOnCompleteFrameCallback mock_on_complete_frame_callback_;
245 std::unique_ptr<ProcessThread> process_thread_;
246 std::unique_ptr<ReceiveStatistics> rtp_receive_statistics_;
247 std::unique_ptr<RtpVideoStreamReceiver> rtp_video_stream_receiver_;
248 };
249
TEST_F(RtpVideoStreamReceiverTest,CacheColorSpaceFromLastPacketOfKeyframe)250 TEST_F(RtpVideoStreamReceiverTest, CacheColorSpaceFromLastPacketOfKeyframe) {
251 // Test that color space is cached from the last packet of a key frame and
252 // that it's not reset by padding packets without color space.
253 constexpr int kVp9PayloadType = 99;
254 const ColorSpace kColorSpace(
255 ColorSpace::PrimaryID::kFILM, ColorSpace::TransferID::kBT2020_12,
256 ColorSpace::MatrixID::kBT2020_NCL, ColorSpace::RangeID::kFull);
257 const std::vector<uint8_t> kKeyFramePayload = {0, 1, 2, 3, 4, 5,
258 6, 7, 8, 9, 10};
259 const std::vector<uint8_t> kDeltaFramePayload = {0, 1, 2, 3, 4};
260
261 // Anonymous helper class that generates received packets.
262 class {
263 public:
264 void SetPayload(const std::vector<uint8_t>& payload,
265 VideoFrameType video_frame_type) {
266 video_frame_type_ = video_frame_type;
267 RtpPacketizer::PayloadSizeLimits pay_load_size_limits;
268 // Reduce max payload length to make sure the key frame generates two
269 // packets.
270 pay_load_size_limits.max_payload_len = 8;
271 RTPVideoHeaderVP9 rtp_video_header_vp9;
272 rtp_video_header_vp9.InitRTPVideoHeaderVP9();
273 rtp_video_header_vp9.inter_pic_predicted =
274 (video_frame_type == VideoFrameType::kVideoFrameDelta);
275 rtp_packetizer_ = std::make_unique<RtpPacketizerVp9>(
276 payload, pay_load_size_limits, rtp_video_header_vp9);
277 }
278
279 size_t NumPackets() { return rtp_packetizer_->NumPackets(); }
280 void SetColorSpace(const ColorSpace& color_space) {
281 color_space_ = color_space;
282 }
283
284 RtpPacketReceived NextPacket() {
285 RtpHeaderExtensionMap extension_map;
286 extension_map.Register<ColorSpaceExtension>(1);
287 RtpPacketToSend packet_to_send(&extension_map);
288 packet_to_send.SetSequenceNumber(sequence_number_++);
289 packet_to_send.SetSsrc(kSsrc);
290 packet_to_send.SetPayloadType(kVp9PayloadType);
291 bool include_color_space =
292 (rtp_packetizer_->NumPackets() == 1u &&
293 video_frame_type_ == VideoFrameType::kVideoFrameKey);
294 if (include_color_space) {
295 EXPECT_TRUE(
296 packet_to_send.SetExtension<ColorSpaceExtension>(color_space_));
297 }
298 rtp_packetizer_->NextPacket(&packet_to_send);
299
300 RtpPacketReceived received_packet(&extension_map);
301 received_packet.Parse(packet_to_send.data(), packet_to_send.size());
302 return received_packet;
303 }
304
305 private:
306 uint16_t sequence_number_ = 0;
307 VideoFrameType video_frame_type_;
308 ColorSpace color_space_;
309 std::unique_ptr<RtpPacketizer> rtp_packetizer_;
310 } received_packet_generator;
311 received_packet_generator.SetColorSpace(kColorSpace);
312
313 // Prepare the receiver for VP9.
314 VideoCodec codec;
315 codec.plType = kVp9PayloadType;
316 codec.codecType = kVideoCodecVP9;
317 std::map<std::string, std::string> codec_params;
318 rtp_video_stream_receiver_->AddReceiveCodec(codec, codec_params,
319 /*raw_payload=*/false);
320
321 // Generate key frame packets.
322 received_packet_generator.SetPayload(kKeyFramePayload,
323 VideoFrameType::kVideoFrameKey);
324 EXPECT_EQ(received_packet_generator.NumPackets(), 2u);
325 RtpPacketReceived key_frame_packet1 = received_packet_generator.NextPacket();
326 RtpPacketReceived key_frame_packet2 = received_packet_generator.NextPacket();
327
328 // Generate delta frame packet.
329 received_packet_generator.SetPayload(kDeltaFramePayload,
330 VideoFrameType::kVideoFrameDelta);
331 EXPECT_EQ(received_packet_generator.NumPackets(), 1u);
332 RtpPacketReceived delta_frame_packet = received_packet_generator.NextPacket();
333
334 rtp_video_stream_receiver_->StartReceive();
335 mock_on_complete_frame_callback_.AppendExpectedBitstream(
336 kKeyFramePayload.data(), kKeyFramePayload.size());
337
338 // Send the key frame and expect a callback with color space information.
339 EXPECT_FALSE(key_frame_packet1.GetExtension<ColorSpaceExtension>());
340 EXPECT_TRUE(key_frame_packet2.GetExtension<ColorSpaceExtension>());
341 rtp_video_stream_receiver_->OnRtpPacket(key_frame_packet1);
342 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
343 .WillOnce(Invoke([kColorSpace](video_coding::EncodedFrame* frame) {
344 ASSERT_TRUE(frame->EncodedImage().ColorSpace());
345 EXPECT_EQ(*frame->EncodedImage().ColorSpace(), kColorSpace);
346 }));
347 rtp_video_stream_receiver_->OnRtpPacket(key_frame_packet2);
348 // Resend the first key frame packet to simulate padding for example.
349 rtp_video_stream_receiver_->OnRtpPacket(key_frame_packet1);
350
351 mock_on_complete_frame_callback_.ClearExpectedBitstream();
352 mock_on_complete_frame_callback_.AppendExpectedBitstream(
353 kDeltaFramePayload.data(), kDeltaFramePayload.size());
354
355 // Expect delta frame to have color space set even though color space not
356 // included in the RTP packet.
357 EXPECT_FALSE(delta_frame_packet.GetExtension<ColorSpaceExtension>());
358 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
359 .WillOnce(Invoke([kColorSpace](video_coding::EncodedFrame* frame) {
360 ASSERT_TRUE(frame->EncodedImage().ColorSpace());
361 EXPECT_EQ(*frame->EncodedImage().ColorSpace(), kColorSpace);
362 }));
363 rtp_video_stream_receiver_->OnRtpPacket(delta_frame_packet);
364 }
365
TEST_F(RtpVideoStreamReceiverTest,GenericKeyFrame)366 TEST_F(RtpVideoStreamReceiverTest, GenericKeyFrame) {
367 RtpPacketReceived rtp_packet;
368 rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
369 rtp_packet.SetPayloadType(kPayloadType);
370 rtp_packet.SetSequenceNumber(1);
371 RTPVideoHeader video_header =
372 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
373 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
374 data.size());
375 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
376 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
377 video_header);
378 }
379
TEST_F(RtpVideoStreamReceiverTest,PacketInfoIsPropagatedIntoVideoFrames)380 TEST_F(RtpVideoStreamReceiverTest, PacketInfoIsPropagatedIntoVideoFrames) {
381 constexpr uint64_t kAbsoluteCaptureTimestamp = 12;
382 constexpr int kId0 = 1;
383
384 RtpHeaderExtensionMap extension_map;
385 extension_map.Register<AbsoluteCaptureTimeExtension>(kId0);
386 RtpPacketReceived rtp_packet(&extension_map);
387 rtp_packet.SetPayloadType(kPayloadType);
388 rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
389 rtp_packet.SetSequenceNumber(1);
390 rtp_packet.SetTimestamp(1);
391 rtp_packet.SetSsrc(kSsrc);
392 rtp_packet.SetExtension<AbsoluteCaptureTimeExtension>(
393 AbsoluteCaptureTime{kAbsoluteCaptureTimestamp,
394 /*estimated_capture_clock_offset=*/absl::nullopt});
395
396 RTPVideoHeader video_header =
397 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
398 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
399 data.size());
400 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
401 .WillOnce(Invoke(
402 [kAbsoluteCaptureTimestamp](video_coding::EncodedFrame* frame) {
403 EXPECT_THAT(GetAbsoluteCaptureTimestamps(frame),
404 ElementsAre(kAbsoluteCaptureTimestamp));
405 }));
406 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
407 video_header);
408 }
409
TEST_F(RtpVideoStreamReceiverTest,MissingAbsoluteCaptureTimeIsFilledWithExtrapolatedValue)410 TEST_F(RtpVideoStreamReceiverTest,
411 MissingAbsoluteCaptureTimeIsFilledWithExtrapolatedValue) {
412 constexpr uint64_t kAbsoluteCaptureTimestamp = 12;
413 constexpr int kId0 = 1;
414
415 RtpHeaderExtensionMap extension_map;
416 extension_map.Register<AbsoluteCaptureTimeExtension>(kId0);
417 RtpPacketReceived rtp_packet(&extension_map);
418 rtp_packet.SetPayloadType(kPayloadType);
419
420 rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
421 uint16_t sequence_number = 1;
422 uint32_t rtp_timestamp = 1;
423 rtp_packet.SetSequenceNumber(sequence_number);
424 rtp_packet.SetTimestamp(rtp_timestamp);
425 rtp_packet.SetSsrc(kSsrc);
426 rtp_packet.SetExtension<AbsoluteCaptureTimeExtension>(
427 AbsoluteCaptureTime{kAbsoluteCaptureTimestamp,
428 /*estimated_capture_clock_offset=*/absl::nullopt});
429
430 RTPVideoHeader video_header =
431 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
432 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
433 data.size());
434 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
435 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
436 video_header);
437
438 // Rtp packet without absolute capture time.
439 rtp_packet = RtpPacketReceived(&extension_map);
440 rtp_packet.SetPayloadType(kPayloadType);
441 rtp_packet.SetSequenceNumber(++sequence_number);
442 rtp_packet.SetTimestamp(++rtp_timestamp);
443 rtp_packet.SetSsrc(kSsrc);
444
445 // There is no absolute capture time in the second packet.
446 // Expect rtp video stream receiver to extrapolate it for the resulting video
447 // frame using absolute capture time from the previous packet.
448 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
449 .WillOnce(Invoke([](video_coding::EncodedFrame* frame) {
450 EXPECT_THAT(GetAbsoluteCaptureTimestamps(frame), SizeIs(1));
451 }));
452 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
453 video_header);
454 }
455
TEST_F(RtpVideoStreamReceiverTest,NoInfiniteRecursionOnEncapsulatedRedPacket)456 TEST_F(RtpVideoStreamReceiverTest, NoInfiniteRecursionOnEncapsulatedRedPacket) {
457 const std::vector<uint8_t> data({
458 0x80, // RTP version.
459 kRedPayloadType, // Payload type.
460 0, 0, 0, 0, 0, 0, // Don't care.
461 0, 0, 0x4, 0x57, // SSRC
462 kRedPayloadType, // RED header.
463 0, 0, 0, 0, 0 // Don't care.
464 });
465 RtpPacketReceived packet;
466 EXPECT_TRUE(packet.Parse(data.data(), data.size()));
467 rtp_video_stream_receiver_->StartReceive();
468 rtp_video_stream_receiver_->OnRtpPacket(packet);
469 }
470
TEST_F(RtpVideoStreamReceiverTest,DropsPacketWithRedPayloadTypeAndEmptyPayload)471 TEST_F(RtpVideoStreamReceiverTest,
472 DropsPacketWithRedPayloadTypeAndEmptyPayload) {
473 const uint8_t kRedPayloadType = 125;
474 config_.rtp.red_payload_type = kRedPayloadType;
475 SetUp(); // re-create rtp_video_stream_receiver with red payload type.
476 // clang-format off
477 const uint8_t data[] = {
478 0x80, // RTP version.
479 kRedPayloadType, // Payload type.
480 0, 0, 0, 0, 0, 0, // Don't care.
481 0, 0, 0x4, 0x57, // SSRC
482 // Empty rtp payload.
483 };
484 // clang-format on
485 RtpPacketReceived packet;
486 // Manually convert to CopyOnWriteBuffer to be sure capacity == size
487 // and asan bot can catch read buffer overflow.
488 EXPECT_TRUE(packet.Parse(rtc::CopyOnWriteBuffer(data)));
489 rtp_video_stream_receiver_->StartReceive();
490 rtp_video_stream_receiver_->OnRtpPacket(packet);
491 // Expect asan doesn't find anything.
492 }
493
TEST_F(RtpVideoStreamReceiverTest,GenericKeyFrameBitstreamError)494 TEST_F(RtpVideoStreamReceiverTest, GenericKeyFrameBitstreamError) {
495 RtpPacketReceived rtp_packet;
496 rtp_packet.SetPayloadType(kPayloadType);
497 rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
498 rtp_packet.SetSequenceNumber(1);
499 RTPVideoHeader video_header =
500 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
501 constexpr uint8_t expected_bitsteam[] = {1, 2, 3, 0xff};
502 mock_on_complete_frame_callback_.AppendExpectedBitstream(
503 expected_bitsteam, sizeof(expected_bitsteam));
504 EXPECT_CALL(mock_on_complete_frame_callback_,
505 DoOnCompleteFrameFailBitstream(_));
506 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
507 video_header);
508 }
509
510 class RtpVideoStreamReceiverTestH264
511 : public RtpVideoStreamReceiverTest,
512 public ::testing::WithParamInterface<std::string> {
513 protected:
RtpVideoStreamReceiverTestH264()514 RtpVideoStreamReceiverTestH264() : RtpVideoStreamReceiverTest(GetParam()) {}
515 };
516
517 INSTANTIATE_TEST_SUITE_P(SpsPpsIdrIsKeyframe,
518 RtpVideoStreamReceiverTestH264,
519 Values("", "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/"));
520
521 // Fails on MSAN: https://bugs.chromium.org/p/webrtc/issues/detail?id=11376.
522 #if defined(MEMORY_SANITIZER)
523 #define MAYBE_InBandSpsPps DISABLED_InBandSpsPps
524 #else
525 #define MAYBE_InBandSpsPps InBandSpsPps
526 #endif
TEST_P(RtpVideoStreamReceiverTestH264,MAYBE_InBandSpsPps)527 TEST_P(RtpVideoStreamReceiverTestH264, MAYBE_InBandSpsPps) {
528 rtc::CopyOnWriteBuffer sps_data;
529 RtpPacketReceived rtp_packet;
530 RTPVideoHeader sps_video_header = GetDefaultH264VideoHeader();
531 AddSps(&sps_video_header, 0, &sps_data);
532 rtp_packet.SetSequenceNumber(0);
533 rtp_packet.SetPayloadType(kPayloadType);
534 sps_video_header.is_first_packet_in_frame = true;
535 sps_video_header.frame_type = VideoFrameType::kEmptyFrame;
536 mock_on_complete_frame_callback_.AppendExpectedBitstream(
537 kH264StartCode, sizeof(kH264StartCode));
538 mock_on_complete_frame_callback_.AppendExpectedBitstream(sps_data.data(),
539 sps_data.size());
540 rtp_video_stream_receiver_->OnReceivedPayloadData(sps_data, rtp_packet,
541 sps_video_header);
542
543 rtc::CopyOnWriteBuffer pps_data;
544 RTPVideoHeader pps_video_header = GetDefaultH264VideoHeader();
545 AddPps(&pps_video_header, 0, 1, &pps_data);
546 rtp_packet.SetSequenceNumber(1);
547 pps_video_header.is_first_packet_in_frame = true;
548 pps_video_header.frame_type = VideoFrameType::kEmptyFrame;
549 mock_on_complete_frame_callback_.AppendExpectedBitstream(
550 kH264StartCode, sizeof(kH264StartCode));
551 mock_on_complete_frame_callback_.AppendExpectedBitstream(pps_data.data(),
552 pps_data.size());
553 rtp_video_stream_receiver_->OnReceivedPayloadData(pps_data, rtp_packet,
554 pps_video_header);
555
556 rtc::CopyOnWriteBuffer idr_data;
557 RTPVideoHeader idr_video_header = GetDefaultH264VideoHeader();
558 AddIdr(&idr_video_header, 1);
559 rtp_packet.SetSequenceNumber(2);
560 idr_video_header.is_first_packet_in_frame = true;
561 idr_video_header.is_last_packet_in_frame = true;
562 idr_video_header.frame_type = VideoFrameType::kVideoFrameKey;
563 const uint8_t idr[] = {0x65, 1, 2, 3};
564 idr_data.AppendData(idr);
565 mock_on_complete_frame_callback_.AppendExpectedBitstream(
566 kH264StartCode, sizeof(kH264StartCode));
567 mock_on_complete_frame_callback_.AppendExpectedBitstream(idr_data.data(),
568 idr_data.size());
569 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
570 rtp_video_stream_receiver_->OnReceivedPayloadData(idr_data, rtp_packet,
571 idr_video_header);
572 }
573
TEST_P(RtpVideoStreamReceiverTestH264,OutOfBandFmtpSpsPps)574 TEST_P(RtpVideoStreamReceiverTestH264, OutOfBandFmtpSpsPps) {
575 constexpr int kPayloadType = 99;
576 VideoCodec codec;
577 codec.plType = kPayloadType;
578 std::map<std::string, std::string> codec_params;
579 // Example parameter sets from https://tools.ietf.org/html/rfc3984#section-8.2
580 // .
581 codec_params.insert(
582 {cricket::kH264FmtpSpropParameterSets, "Z0IACpZTBYmI,aMljiA=="});
583 rtp_video_stream_receiver_->AddReceiveCodec(codec, codec_params,
584 /*raw_payload=*/false);
585 const uint8_t binary_sps[] = {0x67, 0x42, 0x00, 0x0a, 0x96,
586 0x53, 0x05, 0x89, 0x88};
587 mock_on_complete_frame_callback_.AppendExpectedBitstream(
588 kH264StartCode, sizeof(kH264StartCode));
589 mock_on_complete_frame_callback_.AppendExpectedBitstream(binary_sps,
590 sizeof(binary_sps));
591 const uint8_t binary_pps[] = {0x68, 0xc9, 0x63, 0x88};
592 mock_on_complete_frame_callback_.AppendExpectedBitstream(
593 kH264StartCode, sizeof(kH264StartCode));
594 mock_on_complete_frame_callback_.AppendExpectedBitstream(binary_pps,
595 sizeof(binary_pps));
596
597 RtpPacketReceived rtp_packet;
598 RTPVideoHeader video_header = GetDefaultH264VideoHeader();
599 AddIdr(&video_header, 0);
600 rtp_packet.SetPayloadType(kPayloadType);
601 rtp_packet.SetSequenceNumber(2);
602 video_header.is_first_packet_in_frame = true;
603 video_header.is_last_packet_in_frame = true;
604 video_header.codec = kVideoCodecH264;
605 video_header.frame_type = VideoFrameType::kVideoFrameKey;
606 rtc::CopyOnWriteBuffer data({1, 2, 3});
607 mock_on_complete_frame_callback_.AppendExpectedBitstream(
608 kH264StartCode, sizeof(kH264StartCode));
609 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
610 data.size());
611 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
612 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
613 video_header);
614 }
615
TEST_F(RtpVideoStreamReceiverTest,PaddingInMediaStream)616 TEST_F(RtpVideoStreamReceiverTest, PaddingInMediaStream) {
617 RtpPacketReceived rtp_packet;
618 RTPVideoHeader video_header = GetDefaultH264VideoHeader();
619 rtc::CopyOnWriteBuffer data({1, 2, 3});
620 rtp_packet.SetPayloadType(kPayloadType);
621 rtp_packet.SetSequenceNumber(2);
622 video_header.is_first_packet_in_frame = true;
623 video_header.is_last_packet_in_frame = true;
624 video_header.codec = kVideoCodecGeneric;
625 video_header.frame_type = VideoFrameType::kVideoFrameKey;
626 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
627 data.size());
628
629 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
630 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
631 video_header);
632
633 rtp_packet.SetSequenceNumber(3);
634 rtp_video_stream_receiver_->OnReceivedPayloadData({}, rtp_packet,
635 video_header);
636
637 rtp_packet.SetSequenceNumber(4);
638 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
639 video_header.frame_type = VideoFrameType::kVideoFrameDelta;
640 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
641 video_header);
642
643 rtp_packet.SetSequenceNumber(6);
644 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
645 video_header);
646
647 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
648 rtp_packet.SetSequenceNumber(5);
649 rtp_video_stream_receiver_->OnReceivedPayloadData({}, rtp_packet,
650 video_header);
651 }
652
TEST_F(RtpVideoStreamReceiverTest,RequestKeyframeIfFirstFrameIsDelta)653 TEST_F(RtpVideoStreamReceiverTest, RequestKeyframeIfFirstFrameIsDelta) {
654 RtpPacketReceived rtp_packet;
655 rtp_packet.SetPayloadType(kPayloadType);
656 rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
657 rtp_packet.SetSequenceNumber(1);
658 RTPVideoHeader video_header =
659 GetGenericVideoHeader(VideoFrameType::kVideoFrameDelta);
660 EXPECT_CALL(mock_key_frame_request_sender_, RequestKeyFrame());
661 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
662 video_header);
663 }
664
TEST_F(RtpVideoStreamReceiverTest,RequestKeyframeWhenPacketBufferGetsFull)665 TEST_F(RtpVideoStreamReceiverTest, RequestKeyframeWhenPacketBufferGetsFull) {
666 constexpr int kPacketBufferMaxSize = 2048;
667
668 RtpPacketReceived rtp_packet;
669 rtp_packet.SetPayloadType(kPayloadType);
670 rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
671 RTPVideoHeader video_header =
672 GetGenericVideoHeader(VideoFrameType::kVideoFrameDelta);
673 // Incomplete frames so that the packet buffer is filling up.
674 video_header.is_last_packet_in_frame = false;
675 uint16_t start_sequence_number = 1234;
676 rtp_packet.SetSequenceNumber(start_sequence_number);
677 while (rtp_packet.SequenceNumber() - start_sequence_number <
678 kPacketBufferMaxSize) {
679 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
680 video_header);
681 rtp_packet.SetSequenceNumber(rtp_packet.SequenceNumber() + 2);
682 }
683
684 EXPECT_CALL(mock_key_frame_request_sender_, RequestKeyFrame());
685 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
686 video_header);
687 }
688
TEST_F(RtpVideoStreamReceiverTest,SecondarySinksGetRtpNotifications)689 TEST_F(RtpVideoStreamReceiverTest, SecondarySinksGetRtpNotifications) {
690 rtp_video_stream_receiver_->StartReceive();
691
692 MockRtpPacketSink secondary_sink_1;
693 MockRtpPacketSink secondary_sink_2;
694
695 rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink_1);
696 rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink_2);
697
698 auto rtp_packet = CreateRtpPacketReceived();
699 EXPECT_CALL(secondary_sink_1, OnRtpPacket(SamePacketAs(*rtp_packet)));
700 EXPECT_CALL(secondary_sink_2, OnRtpPacket(SamePacketAs(*rtp_packet)));
701
702 rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
703
704 // Test tear-down.
705 rtp_video_stream_receiver_->StopReceive();
706 rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink_1);
707 rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink_2);
708 }
709
TEST_F(RtpVideoStreamReceiverTest,RemovedSecondarySinksGetNoRtpNotifications)710 TEST_F(RtpVideoStreamReceiverTest, RemovedSecondarySinksGetNoRtpNotifications) {
711 rtp_video_stream_receiver_->StartReceive();
712
713 MockRtpPacketSink secondary_sink;
714
715 rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink);
716 rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink);
717
718 auto rtp_packet = CreateRtpPacketReceived();
719
720 EXPECT_CALL(secondary_sink, OnRtpPacket(_)).Times(0);
721
722 rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
723
724 // Test tear-down.
725 rtp_video_stream_receiver_->StopReceive();
726 }
727
TEST_F(RtpVideoStreamReceiverTest,OnlyRemovedSecondarySinksExcludedFromNotifications)728 TEST_F(RtpVideoStreamReceiverTest,
729 OnlyRemovedSecondarySinksExcludedFromNotifications) {
730 rtp_video_stream_receiver_->StartReceive();
731
732 MockRtpPacketSink kept_secondary_sink;
733 MockRtpPacketSink removed_secondary_sink;
734
735 rtp_video_stream_receiver_->AddSecondarySink(&kept_secondary_sink);
736 rtp_video_stream_receiver_->AddSecondarySink(&removed_secondary_sink);
737 rtp_video_stream_receiver_->RemoveSecondarySink(&removed_secondary_sink);
738
739 auto rtp_packet = CreateRtpPacketReceived();
740 EXPECT_CALL(kept_secondary_sink, OnRtpPacket(SamePacketAs(*rtp_packet)));
741
742 rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
743
744 // Test tear-down.
745 rtp_video_stream_receiver_->StopReceive();
746 rtp_video_stream_receiver_->RemoveSecondarySink(&kept_secondary_sink);
747 }
748
TEST_F(RtpVideoStreamReceiverTest,SecondariesOfNonStartedStreamGetNoNotifications)749 TEST_F(RtpVideoStreamReceiverTest,
750 SecondariesOfNonStartedStreamGetNoNotifications) {
751 // Explicitly showing that the stream is not in the |started| state,
752 // regardless of whether streams start out |started| or |stopped|.
753 rtp_video_stream_receiver_->StopReceive();
754
755 MockRtpPacketSink secondary_sink;
756 rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink);
757
758 auto rtp_packet = CreateRtpPacketReceived();
759 EXPECT_CALL(secondary_sink, OnRtpPacket(_)).Times(0);
760
761 rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
762
763 // Test tear-down.
764 rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink);
765 }
766
TEST_F(RtpVideoStreamReceiverTest,ParseGenericDescriptorOnePacket)767 TEST_F(RtpVideoStreamReceiverTest, ParseGenericDescriptorOnePacket) {
768 const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
769 const int kSpatialIndex = 1;
770
771 rtp_video_stream_receiver_->StartReceive();
772
773 RtpHeaderExtensionMap extension_map;
774 extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
775 RtpPacketReceived rtp_packet(&extension_map);
776 rtp_packet.SetPayloadType(kPayloadType);
777
778 RtpGenericFrameDescriptor generic_descriptor;
779 generic_descriptor.SetFirstPacketInSubFrame(true);
780 generic_descriptor.SetLastPacketInSubFrame(true);
781 generic_descriptor.SetFrameId(100);
782 generic_descriptor.SetSpatialLayersBitmask(1 << kSpatialIndex);
783 generic_descriptor.AddFrameDependencyDiff(90);
784 generic_descriptor.AddFrameDependencyDiff(80);
785 ASSERT_TRUE(rtp_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
786 generic_descriptor));
787
788 uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
789 memcpy(payload, data.data(), data.size());
790 // The first byte is the header, so we ignore the first byte of |data|.
791 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
792 data.size() - 1);
793
794 rtp_packet.SetMarker(true);
795 rtp_packet.SetPayloadType(kPayloadType);
796 rtp_packet.SetSequenceNumber(1);
797
798 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
799 .WillOnce(Invoke([kSpatialIndex](video_coding::EncodedFrame* frame) {
800 EXPECT_EQ(frame->num_references, 2U);
801 EXPECT_EQ(frame->references[0], frame->id.picture_id - 90);
802 EXPECT_EQ(frame->references[1], frame->id.picture_id - 80);
803 EXPECT_EQ(frame->id.spatial_layer, kSpatialIndex);
804 EXPECT_THAT(frame->PacketInfos(), SizeIs(1));
805 }));
806
807 rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
808 }
809
TEST_F(RtpVideoStreamReceiverTest,ParseGenericDescriptorTwoPackets)810 TEST_F(RtpVideoStreamReceiverTest, ParseGenericDescriptorTwoPackets) {
811 const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
812 const int kSpatialIndex = 1;
813
814 rtp_video_stream_receiver_->StartReceive();
815
816 RtpHeaderExtensionMap extension_map;
817 extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
818 RtpPacketReceived first_packet(&extension_map);
819
820 RtpGenericFrameDescriptor first_packet_descriptor;
821 first_packet_descriptor.SetFirstPacketInSubFrame(true);
822 first_packet_descriptor.SetLastPacketInSubFrame(false);
823 first_packet_descriptor.SetFrameId(100);
824 first_packet_descriptor.SetSpatialLayersBitmask(1 << kSpatialIndex);
825 first_packet_descriptor.SetResolution(480, 360);
826 ASSERT_TRUE(first_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
827 first_packet_descriptor));
828
829 uint8_t* first_packet_payload = first_packet.SetPayloadSize(data.size());
830 memcpy(first_packet_payload, data.data(), data.size());
831 // The first byte is the header, so we ignore the first byte of |data|.
832 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
833 data.size() - 1);
834
835 first_packet.SetPayloadType(kPayloadType);
836 first_packet.SetSequenceNumber(1);
837 rtp_video_stream_receiver_->OnRtpPacket(first_packet);
838
839 RtpPacketReceived second_packet(&extension_map);
840 RtpGenericFrameDescriptor second_packet_descriptor;
841 second_packet_descriptor.SetFirstPacketInSubFrame(false);
842 second_packet_descriptor.SetLastPacketInSubFrame(true);
843 ASSERT_TRUE(second_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
844 second_packet_descriptor));
845
846 second_packet.SetMarker(true);
847 second_packet.SetPayloadType(kPayloadType);
848 second_packet.SetSequenceNumber(2);
849
850 uint8_t* second_packet_payload = second_packet.SetPayloadSize(data.size());
851 memcpy(second_packet_payload, data.data(), data.size());
852 // The first byte is the header, so we ignore the first byte of |data|.
853 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
854 data.size() - 1);
855
856 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
857 .WillOnce(Invoke([kSpatialIndex](video_coding::EncodedFrame* frame) {
858 EXPECT_EQ(frame->num_references, 0U);
859 EXPECT_EQ(frame->id.spatial_layer, kSpatialIndex);
860 EXPECT_EQ(frame->EncodedImage()._encodedWidth, 480u);
861 EXPECT_EQ(frame->EncodedImage()._encodedHeight, 360u);
862 EXPECT_THAT(frame->PacketInfos(), SizeIs(2));
863 }));
864
865 rtp_video_stream_receiver_->OnRtpPacket(second_packet);
866 }
867
TEST_F(RtpVideoStreamReceiverTest,ParseGenericDescriptorRawPayload)868 TEST_F(RtpVideoStreamReceiverTest, ParseGenericDescriptorRawPayload) {
869 const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
870 const int kRawPayloadType = 123;
871
872 VideoCodec codec;
873 codec.plType = kRawPayloadType;
874 rtp_video_stream_receiver_->AddReceiveCodec(codec, {}, /*raw_payload=*/true);
875 rtp_video_stream_receiver_->StartReceive();
876
877 RtpHeaderExtensionMap extension_map;
878 extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
879 RtpPacketReceived rtp_packet(&extension_map);
880
881 RtpGenericFrameDescriptor generic_descriptor;
882 generic_descriptor.SetFirstPacketInSubFrame(true);
883 generic_descriptor.SetLastPacketInSubFrame(true);
884 ASSERT_TRUE(rtp_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
885 generic_descriptor));
886
887 uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
888 memcpy(payload, data.data(), data.size());
889 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
890 data.size());
891
892 rtp_packet.SetMarker(true);
893 rtp_packet.SetPayloadType(kRawPayloadType);
894 rtp_packet.SetSequenceNumber(1);
895
896 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame);
897 rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
898 }
899
TEST_F(RtpVideoStreamReceiverTest,UnwrapsFrameId)900 TEST_F(RtpVideoStreamReceiverTest, UnwrapsFrameId) {
901 const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
902 const int kPayloadType = 123;
903
904 VideoCodec codec;
905 codec.plType = kPayloadType;
906 rtp_video_stream_receiver_->AddReceiveCodec(codec, {}, /*raw_payload=*/true);
907 rtp_video_stream_receiver_->StartReceive();
908 RtpHeaderExtensionMap extension_map;
909 extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
910
911 uint16_t rtp_sequence_number = 1;
912 auto inject_packet = [&](uint16_t wrapped_frame_id) {
913 RtpPacketReceived rtp_packet(&extension_map);
914
915 RtpGenericFrameDescriptor generic_descriptor;
916 generic_descriptor.SetFirstPacketInSubFrame(true);
917 generic_descriptor.SetLastPacketInSubFrame(true);
918 generic_descriptor.SetFrameId(wrapped_frame_id);
919 ASSERT_TRUE(rtp_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
920 generic_descriptor));
921
922 uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
923 ASSERT_TRUE(payload);
924 memcpy(payload, data.data(), data.size());
925 mock_on_complete_frame_callback_.ClearExpectedBitstream();
926 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
927 data.size());
928 rtp_packet.SetMarker(true);
929 rtp_packet.SetPayloadType(kPayloadType);
930 rtp_packet.SetSequenceNumber(++rtp_sequence_number);
931 rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
932 };
933
934 int64_t first_picture_id;
935 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
936 .WillOnce([&](video_coding::EncodedFrame* frame) {
937 first_picture_id = frame->id.picture_id;
938 });
939 inject_packet(/*wrapped_frame_id=*/0xffff);
940
941 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
942 .WillOnce([&](video_coding::EncodedFrame* frame) {
943 EXPECT_EQ(frame->id.picture_id - first_picture_id, 3);
944 });
945 inject_packet(/*wrapped_frame_id=*/0x0002);
946 }
947
948 class RtpVideoStreamReceiverDependencyDescriptorTest
949 : public RtpVideoStreamReceiverTest {
950 public:
RtpVideoStreamReceiverDependencyDescriptorTest()951 RtpVideoStreamReceiverDependencyDescriptorTest() {
952 VideoCodec codec;
953 codec.plType = payload_type_;
954 rtp_video_stream_receiver_->AddReceiveCodec(codec, {},
955 /*raw_payload=*/true);
956 extension_map_.Register<RtpDependencyDescriptorExtension>(7);
957 rtp_video_stream_receiver_->StartReceive();
958 }
959
960 // Returns some valid structure for the DependencyDescriptors.
961 // First template of that structure always fit for a key frame.
CreateStreamStructure()962 static FrameDependencyStructure CreateStreamStructure() {
963 FrameDependencyStructure stream_structure;
964 stream_structure.num_decode_targets = 1;
965 stream_structure.templates = {
966 FrameDependencyTemplate().Dtis("S"),
967 FrameDependencyTemplate().Dtis("S").FrameDiffs({1}),
968 };
969 return stream_structure;
970 }
971
InjectPacketWith(const FrameDependencyStructure & stream_structure,const DependencyDescriptor & dependency_descriptor)972 void InjectPacketWith(const FrameDependencyStructure& stream_structure,
973 const DependencyDescriptor& dependency_descriptor) {
974 const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
975 RtpPacketReceived rtp_packet(&extension_map_);
976 ASSERT_TRUE(rtp_packet.SetExtension<RtpDependencyDescriptorExtension>(
977 stream_structure, dependency_descriptor));
978 uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
979 ASSERT_TRUE(payload);
980 memcpy(payload, data.data(), data.size());
981 mock_on_complete_frame_callback_.ClearExpectedBitstream();
982 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
983 data.size());
984 rtp_packet.SetMarker(true);
985 rtp_packet.SetPayloadType(payload_type_);
986 rtp_packet.SetSequenceNumber(++rtp_sequence_number_);
987 rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
988 }
989
990 private:
991 const int payload_type_ = 123;
992 RtpHeaderExtensionMap extension_map_;
993 uint16_t rtp_sequence_number_ = 321;
994 };
995
TEST_F(RtpVideoStreamReceiverDependencyDescriptorTest,UnwrapsFrameId)996 TEST_F(RtpVideoStreamReceiverDependencyDescriptorTest, UnwrapsFrameId) {
997 FrameDependencyStructure stream_structure = CreateStreamStructure();
998
999 DependencyDescriptor keyframe_descriptor;
1000 keyframe_descriptor.attached_structure =
1001 std::make_unique<FrameDependencyStructure>(stream_structure);
1002 keyframe_descriptor.frame_dependencies = stream_structure.templates[0];
1003 keyframe_descriptor.frame_number = 0xfff0;
1004 // DependencyDescriptor doesn't support reordering delta frame before
1005 // keyframe. Thus feed a key frame first, then test reodered delta frames.
1006 int64_t first_picture_id;
1007 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
1008 .WillOnce([&](video_coding::EncodedFrame* frame) {
1009 first_picture_id = frame->id.picture_id;
1010 });
1011 InjectPacketWith(stream_structure, keyframe_descriptor);
1012
1013 DependencyDescriptor deltaframe1_descriptor;
1014 deltaframe1_descriptor.frame_dependencies = stream_structure.templates[1];
1015 deltaframe1_descriptor.frame_number = 0xfffe;
1016
1017 DependencyDescriptor deltaframe2_descriptor;
1018 deltaframe1_descriptor.frame_dependencies = stream_structure.templates[1];
1019 deltaframe2_descriptor.frame_number = 0x0002;
1020
1021 // Parser should unwrap frame ids correctly even if packets were reordered by
1022 // the network.
1023 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
1024 .WillOnce([&](video_coding::EncodedFrame* frame) {
1025 // 0x0002 - 0xfff0
1026 EXPECT_EQ(frame->id.picture_id - first_picture_id, 18);
1027 })
1028 .WillOnce([&](video_coding::EncodedFrame* frame) {
1029 // 0xfffe - 0xfff0
1030 EXPECT_EQ(frame->id.picture_id - first_picture_id, 14);
1031 });
1032 InjectPacketWith(stream_structure, deltaframe2_descriptor);
1033 InjectPacketWith(stream_structure, deltaframe1_descriptor);
1034 }
1035
TEST_F(RtpVideoStreamReceiverDependencyDescriptorTest,DropsLateDeltaFramePacketWithDependencyDescriptorExtension)1036 TEST_F(RtpVideoStreamReceiverDependencyDescriptorTest,
1037 DropsLateDeltaFramePacketWithDependencyDescriptorExtension) {
1038 FrameDependencyStructure stream_structure1 = CreateStreamStructure();
1039 FrameDependencyStructure stream_structure2 = CreateStreamStructure();
1040 // Make sure template ids for these two structures do not collide:
1041 // adjust structure_id (that is also used as template id offset).
1042 stream_structure1.structure_id = 13;
1043 stream_structure2.structure_id =
1044 stream_structure1.structure_id + stream_structure1.templates.size();
1045
1046 DependencyDescriptor keyframe1_descriptor;
1047 keyframe1_descriptor.attached_structure =
1048 std::make_unique<FrameDependencyStructure>(stream_structure1);
1049 keyframe1_descriptor.frame_dependencies = stream_structure1.templates[0];
1050 keyframe1_descriptor.frame_number = 1;
1051 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame);
1052 InjectPacketWith(stream_structure1, keyframe1_descriptor);
1053
1054 // Pass in 2nd key frame with different structure.
1055 DependencyDescriptor keyframe2_descriptor;
1056 keyframe2_descriptor.attached_structure =
1057 std::make_unique<FrameDependencyStructure>(stream_structure2);
1058 keyframe2_descriptor.frame_dependencies = stream_structure2.templates[0];
1059 keyframe2_descriptor.frame_number = 3;
1060 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame);
1061 InjectPacketWith(stream_structure2, keyframe2_descriptor);
1062
1063 // Pass in late delta frame that uses structure of the 1st key frame.
1064 DependencyDescriptor deltaframe_descriptor;
1065 deltaframe_descriptor.frame_dependencies = stream_structure1.templates[0];
1066 deltaframe_descriptor.frame_number = 2;
1067 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame).Times(0);
1068 InjectPacketWith(stream_structure1, deltaframe_descriptor);
1069 }
1070
TEST_F(RtpVideoStreamReceiverDependencyDescriptorTest,DropsLateKeyFramePacketWithDependencyDescriptorExtension)1071 TEST_F(RtpVideoStreamReceiverDependencyDescriptorTest,
1072 DropsLateKeyFramePacketWithDependencyDescriptorExtension) {
1073 FrameDependencyStructure stream_structure1 = CreateStreamStructure();
1074 FrameDependencyStructure stream_structure2 = CreateStreamStructure();
1075 // Make sure template ids for these two structures do not collide:
1076 // adjust structure_id (that is also used as template id offset).
1077 stream_structure1.structure_id = 13;
1078 stream_structure2.structure_id =
1079 stream_structure1.structure_id + stream_structure1.templates.size();
1080
1081 DependencyDescriptor keyframe1_descriptor;
1082 keyframe1_descriptor.attached_structure =
1083 std::make_unique<FrameDependencyStructure>(stream_structure1);
1084 keyframe1_descriptor.frame_dependencies = stream_structure1.templates[0];
1085 keyframe1_descriptor.frame_number = 1;
1086
1087 DependencyDescriptor keyframe2_descriptor;
1088 keyframe2_descriptor.attached_structure =
1089 std::make_unique<FrameDependencyStructure>(stream_structure2);
1090 keyframe2_descriptor.frame_dependencies = stream_structure2.templates[0];
1091 keyframe2_descriptor.frame_number = 3;
1092
1093 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
1094 .WillOnce([&](video_coding::EncodedFrame* frame) {
1095 EXPECT_EQ(frame->id.picture_id & 0xFFFF, 3);
1096 });
1097 InjectPacketWith(stream_structure2, keyframe2_descriptor);
1098 InjectPacketWith(stream_structure1, keyframe1_descriptor);
1099
1100 // Pass in delta frame that uses structure of the 2nd key frame. Late key
1101 // frame shouldn't block it.
1102 DependencyDescriptor deltaframe_descriptor;
1103 deltaframe_descriptor.frame_dependencies = stream_structure2.templates[0];
1104 deltaframe_descriptor.frame_number = 4;
1105 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
1106 .WillOnce([&](video_coding::EncodedFrame* frame) {
1107 EXPECT_EQ(frame->id.picture_id & 0xFFFF, 4);
1108 });
1109 InjectPacketWith(stream_structure2, deltaframe_descriptor);
1110 }
1111
1112 #if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
1113 using RtpVideoStreamReceiverDeathTest = RtpVideoStreamReceiverTest;
TEST_F(RtpVideoStreamReceiverDeathTest,RepeatedSecondarySinkDisallowed)1114 TEST_F(RtpVideoStreamReceiverDeathTest, RepeatedSecondarySinkDisallowed) {
1115 MockRtpPacketSink secondary_sink;
1116
1117 rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink);
1118 EXPECT_DEATH(rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink),
1119 "");
1120
1121 // Test tear-down.
1122 rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink);
1123 }
1124 #endif
1125
TEST_F(RtpVideoStreamReceiverTest,TransformFrame)1126 TEST_F(RtpVideoStreamReceiverTest, TransformFrame) {
1127 rtc::scoped_refptr<MockFrameTransformer> mock_frame_transformer =
1128 new rtc::RefCountedObject<testing::NiceMock<MockFrameTransformer>>();
1129 EXPECT_CALL(*mock_frame_transformer,
1130 RegisterTransformedFrameSinkCallback(_, config_.rtp.remote_ssrc));
1131 auto receiver = std::make_unique<RtpVideoStreamReceiver>(
1132 Clock::GetRealTimeClock(), &mock_transport_, nullptr, nullptr, &config_,
1133 rtp_receive_statistics_.get(), nullptr, nullptr, process_thread_.get(),
1134 &mock_nack_sender_, nullptr, &mock_on_complete_frame_callback_, nullptr,
1135 mock_frame_transformer);
1136 VideoCodec video_codec;
1137 video_codec.plType = kPayloadType;
1138 video_codec.codecType = kVideoCodecGeneric;
1139 receiver->AddReceiveCodec(video_codec, {}, /*raw_payload=*/false);
1140
1141 RtpPacketReceived rtp_packet;
1142 rtp_packet.SetPayloadType(kPayloadType);
1143 rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
1144 rtp_packet.SetSequenceNumber(1);
1145 RTPVideoHeader video_header =
1146 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
1147 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
1148 data.size());
1149 EXPECT_CALL(*mock_frame_transformer, Transform(_));
1150 receiver->OnReceivedPayloadData(data, rtp_packet, video_header);
1151
1152 EXPECT_CALL(*mock_frame_transformer,
1153 UnregisterTransformedFrameSinkCallback(config_.rtp.remote_ssrc));
1154 receiver = nullptr;
1155 }
1156
1157 // Test default behavior and when playout delay is overridden by field trial.
1158 const PlayoutDelay kTransmittedPlayoutDelay = {100, 200};
1159 const PlayoutDelay kForcedPlayoutDelay = {70, 90};
1160 struct PlayoutDelayOptions {
1161 std::string field_trial;
1162 PlayoutDelay expected_delay;
1163 };
1164 const PlayoutDelayOptions kDefaultBehavior = {
1165 /*field_trial=*/"", /*expected_delay=*/kTransmittedPlayoutDelay};
1166 const PlayoutDelayOptions kOverridePlayoutDelay = {
1167 /*field_trial=*/"WebRTC-ForcePlayoutDelay/min_ms:70,max_ms:90/",
1168 /*expected_delay=*/kForcedPlayoutDelay};
1169
1170 class RtpVideoStreamReceiverTestPlayoutDelay
1171 : public RtpVideoStreamReceiverTest,
1172 public ::testing::WithParamInterface<PlayoutDelayOptions> {
1173 protected:
RtpVideoStreamReceiverTestPlayoutDelay()1174 RtpVideoStreamReceiverTestPlayoutDelay()
1175 : RtpVideoStreamReceiverTest(GetParam().field_trial) {}
1176 };
1177
1178 INSTANTIATE_TEST_SUITE_P(PlayoutDelay,
1179 RtpVideoStreamReceiverTestPlayoutDelay,
1180 Values(kDefaultBehavior, kOverridePlayoutDelay));
1181
TEST_P(RtpVideoStreamReceiverTestPlayoutDelay,PlayoutDelay)1182 TEST_P(RtpVideoStreamReceiverTestPlayoutDelay, PlayoutDelay) {
1183 rtc::CopyOnWriteBuffer payload_data({1, 2, 3, 4});
1184 RtpHeaderExtensionMap extension_map;
1185 extension_map.Register<PlayoutDelayLimits>(1);
1186 RtpPacketToSend packet_to_send(&extension_map);
1187 packet_to_send.SetPayloadType(kPayloadType);
1188 packet_to_send.SetSequenceNumber(1);
1189
1190 // Set playout delay on outgoing packet.
1191 EXPECT_TRUE(packet_to_send.SetExtension<PlayoutDelayLimits>(
1192 kTransmittedPlayoutDelay));
1193 uint8_t* payload = packet_to_send.AllocatePayload(payload_data.size());
1194 memcpy(payload, payload_data.data(), payload_data.size());
1195
1196 RtpPacketReceived received_packet(&extension_map);
1197 received_packet.Parse(packet_to_send.data(), packet_to_send.size());
1198
1199 RTPVideoHeader video_header =
1200 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
1201 mock_on_complete_frame_callback_.AppendExpectedBitstream(payload_data.data(),
1202 payload_data.size());
1203 // Expect the playout delay of encoded frame to be the same as the transmitted
1204 // playout delay unless it was overridden by a field trial.
1205 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
1206 .WillOnce(Invoke([expected_playout_delay = GetParam().expected_delay](
1207 video_coding::EncodedFrame* frame) {
1208 EXPECT_EQ(frame->EncodedImage().playout_delay_, expected_playout_delay);
1209 }));
1210 rtp_video_stream_receiver_->OnReceivedPayloadData(
1211 received_packet.PayloadBuffer(), received_packet, video_header);
1212 }
1213
1214 } // namespace webrtc
1215