1 /*
2 * Copyright 2017 The WebRTC Project Authors. All rights reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "video/rtp_video_stream_receiver2.h"
12
13 #include <memory>
14 #include <utility>
15
16 #include "api/video/video_codec_type.h"
17 #include "api/video/video_frame_type.h"
18 #include "common_video/h264/h264_common.h"
19 #include "media/base/media_constants.h"
20 #include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
21 #include "modules/rtp_rtcp/source/rtp_format.h"
22 #include "modules/rtp_rtcp/source/rtp_format_vp9.h"
23 #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
24 #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
25 #include "modules/rtp_rtcp/source/rtp_header_extensions.h"
26 #include "modules/rtp_rtcp/source/rtp_packet_received.h"
27 #include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
28 #include "modules/utility/include/process_thread.h"
29 #include "modules/video_coding/frame_object.h"
30 #include "modules/video_coding/include/video_coding_defines.h"
31 #include "modules/video_coding/rtp_frame_reference_finder.h"
32 #include "rtc_base/byte_buffer.h"
33 #include "rtc_base/logging.h"
34 #include "system_wrappers/include/clock.h"
35 #include "system_wrappers/include/field_trial.h"
36 #include "test/field_trial.h"
37 #include "test/gmock.h"
38 #include "test/gtest.h"
39 #include "test/mock_frame_transformer.h"
40 #include "test/time_controller/simulated_task_queue.h"
41
42 using ::testing::_;
43 using ::testing::ElementsAre;
44 using ::testing::Invoke;
45 using ::testing::SizeIs;
46 using ::testing::Values;
47
48 namespace webrtc {
49
50 namespace {
51
52 const uint8_t kH264StartCode[] = {0x00, 0x00, 0x00, 0x01};
53
GetAbsoluteCaptureTimestamps(const video_coding::EncodedFrame * frame)54 std::vector<uint64_t> GetAbsoluteCaptureTimestamps(
55 const video_coding::EncodedFrame* frame) {
56 std::vector<uint64_t> result;
57 for (const auto& packet_info : frame->PacketInfos()) {
58 if (packet_info.absolute_capture_time()) {
59 result.push_back(
60 packet_info.absolute_capture_time()->absolute_capture_timestamp);
61 }
62 }
63 return result;
64 }
65
GetGenericVideoHeader(VideoFrameType frame_type)66 RTPVideoHeader GetGenericVideoHeader(VideoFrameType frame_type) {
67 RTPVideoHeader video_header;
68 video_header.is_first_packet_in_frame = true;
69 video_header.is_last_packet_in_frame = true;
70 video_header.codec = kVideoCodecGeneric;
71 video_header.frame_type = frame_type;
72 return video_header;
73 }
74
75 class MockTransport : public Transport {
76 public:
77 MOCK_METHOD(bool,
78 SendRtp,
79 (const uint8_t*, size_t length, const PacketOptions& options),
80 (override));
81 MOCK_METHOD(bool, SendRtcp, (const uint8_t*, size_t length), (override));
82 };
83
84 class MockNackSender : public NackSender {
85 public:
86 MOCK_METHOD(void,
87 SendNack,
88 (const std::vector<uint16_t>& sequence_numbers,
89 bool buffering_allowed),
90 (override));
91 };
92
93 class MockKeyFrameRequestSender : public KeyFrameRequestSender {
94 public:
95 MOCK_METHOD(void, RequestKeyFrame, (), (override));
96 };
97
98 class MockOnCompleteFrameCallback
99 : public video_coding::OnCompleteFrameCallback {
100 public:
101 MOCK_METHOD(void, DoOnCompleteFrame, (video_coding::EncodedFrame*), ());
102 MOCK_METHOD(void,
103 DoOnCompleteFrameFailNullptr,
104 (video_coding::EncodedFrame*),
105 ());
106 MOCK_METHOD(void,
107 DoOnCompleteFrameFailLength,
108 (video_coding::EncodedFrame*),
109 ());
110 MOCK_METHOD(void,
111 DoOnCompleteFrameFailBitstream,
112 (video_coding::EncodedFrame*),
113 ());
OnCompleteFrame(std::unique_ptr<video_coding::EncodedFrame> frame)114 void OnCompleteFrame(
115 std::unique_ptr<video_coding::EncodedFrame> frame) override {
116 if (!frame) {
117 DoOnCompleteFrameFailNullptr(nullptr);
118 return;
119 }
120 EXPECT_EQ(buffer_.Length(), frame->size());
121 if (buffer_.Length() != frame->size()) {
122 DoOnCompleteFrameFailLength(frame.get());
123 return;
124 }
125 if (frame->size() != buffer_.Length() ||
126 memcmp(buffer_.Data(), frame->data(), buffer_.Length()) != 0) {
127 DoOnCompleteFrameFailBitstream(frame.get());
128 return;
129 }
130 DoOnCompleteFrame(frame.get());
131 }
132
ClearExpectedBitstream()133 void ClearExpectedBitstream() { buffer_.Clear(); }
134
AppendExpectedBitstream(const uint8_t data[],size_t size_in_bytes)135 void AppendExpectedBitstream(const uint8_t data[], size_t size_in_bytes) {
136 // TODO(Johan): Let rtc::ByteBuffer handle uint8_t* instead of char*.
137 buffer_.WriteBytes(reinterpret_cast<const char*>(data), size_in_bytes);
138 }
139 rtc::ByteBufferWriter buffer_;
140 };
141
142 class MockRtpPacketSink : public RtpPacketSinkInterface {
143 public:
144 MOCK_METHOD(void, OnRtpPacket, (const RtpPacketReceived&), (override));
145 };
146
147 constexpr uint32_t kSsrc = 111;
148 constexpr uint16_t kSequenceNumber = 222;
149 constexpr int kPayloadType = 100;
150 constexpr int kRedPayloadType = 125;
151
CreateRtpPacketReceived()152 std::unique_ptr<RtpPacketReceived> CreateRtpPacketReceived() {
153 auto packet = std::make_unique<RtpPacketReceived>();
154 packet->SetSsrc(kSsrc);
155 packet->SetSequenceNumber(kSequenceNumber);
156 packet->SetPayloadType(kPayloadType);
157 return packet;
158 }
159
160 MATCHER_P(SamePacketAs, other, "") {
161 return arg.Ssrc() == other.Ssrc() &&
162 arg.SequenceNumber() == other.SequenceNumber();
163 }
164
165 } // namespace
166
167 class RtpVideoStreamReceiver2Test : public ::testing::Test {
168 public:
RtpVideoStreamReceiver2Test()169 RtpVideoStreamReceiver2Test() : RtpVideoStreamReceiver2Test("") {}
RtpVideoStreamReceiver2Test(std::string field_trials)170 explicit RtpVideoStreamReceiver2Test(std::string field_trials)
171 : override_field_trials_(field_trials),
172 config_(CreateConfig()),
173 process_thread_(ProcessThread::Create("TestThread")) {
174 rtp_receive_statistics_ =
175 ReceiveStatistics::Create(Clock::GetRealTimeClock());
176 rtp_video_stream_receiver_ = std::make_unique<RtpVideoStreamReceiver2>(
177 TaskQueueBase::Current(), Clock::GetRealTimeClock(), &mock_transport_,
178 nullptr, nullptr, &config_, rtp_receive_statistics_.get(), nullptr,
179 nullptr, process_thread_.get(), &mock_nack_sender_,
180 &mock_key_frame_request_sender_, &mock_on_complete_frame_callback_,
181 nullptr, nullptr);
182 VideoCodec codec;
183 codec.plType = kPayloadType;
184 codec.codecType = kVideoCodecGeneric;
185 rtp_video_stream_receiver_->AddReceiveCodec(codec, {},
186 /*raw_payload=*/false);
187 }
188
GetDefaultH264VideoHeader()189 RTPVideoHeader GetDefaultH264VideoHeader() {
190 RTPVideoHeader video_header;
191 video_header.codec = kVideoCodecH264;
192 video_header.video_type_header.emplace<RTPVideoHeaderH264>();
193 return video_header;
194 }
195
196 // TODO(Johan): refactor h264_sps_pps_tracker_unittests.cc to avoid duplicate
197 // code.
AddSps(RTPVideoHeader * video_header,uint8_t sps_id,rtc::CopyOnWriteBuffer * data)198 void AddSps(RTPVideoHeader* video_header,
199 uint8_t sps_id,
200 rtc::CopyOnWriteBuffer* data) {
201 NaluInfo info;
202 info.type = H264::NaluType::kSps;
203 info.sps_id = sps_id;
204 info.pps_id = -1;
205 data->AppendData({H264::NaluType::kSps, sps_id});
206 auto& h264 = absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
207 h264.nalus[h264.nalus_length++] = info;
208 }
209
AddPps(RTPVideoHeader * video_header,uint8_t sps_id,uint8_t pps_id,rtc::CopyOnWriteBuffer * data)210 void AddPps(RTPVideoHeader* video_header,
211 uint8_t sps_id,
212 uint8_t pps_id,
213 rtc::CopyOnWriteBuffer* data) {
214 NaluInfo info;
215 info.type = H264::NaluType::kPps;
216 info.sps_id = sps_id;
217 info.pps_id = pps_id;
218 data->AppendData({H264::NaluType::kPps, pps_id});
219 auto& h264 = absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
220 h264.nalus[h264.nalus_length++] = info;
221 }
222
AddIdr(RTPVideoHeader * video_header,int pps_id)223 void AddIdr(RTPVideoHeader* video_header, int pps_id) {
224 NaluInfo info;
225 info.type = H264::NaluType::kIdr;
226 info.sps_id = -1;
227 info.pps_id = pps_id;
228 auto& h264 = absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
229 h264.nalus[h264.nalus_length++] = info;
230 }
231
232 protected:
CreateConfig()233 static VideoReceiveStream::Config CreateConfig() {
234 VideoReceiveStream::Config config(nullptr);
235 config.rtp.remote_ssrc = 1111;
236 config.rtp.local_ssrc = 2222;
237 config.rtp.red_payload_type = kRedPayloadType;
238 return config;
239 }
240
241 TokenTaskQueue task_queue_;
242 TokenTaskQueue::CurrentTaskQueueSetter task_queue_setter_{&task_queue_};
243
244 const webrtc::test::ScopedFieldTrials override_field_trials_;
245 VideoReceiveStream::Config config_;
246 MockNackSender mock_nack_sender_;
247 MockKeyFrameRequestSender mock_key_frame_request_sender_;
248 MockTransport mock_transport_;
249 MockOnCompleteFrameCallback mock_on_complete_frame_callback_;
250 std::unique_ptr<ProcessThread> process_thread_;
251 std::unique_ptr<ReceiveStatistics> rtp_receive_statistics_;
252 std::unique_ptr<RtpVideoStreamReceiver2> rtp_video_stream_receiver_;
253 };
254
TEST_F(RtpVideoStreamReceiver2Test,CacheColorSpaceFromLastPacketOfKeyframe)255 TEST_F(RtpVideoStreamReceiver2Test, CacheColorSpaceFromLastPacketOfKeyframe) {
256 // Test that color space is cached from the last packet of a key frame and
257 // that it's not reset by padding packets without color space.
258 constexpr int kVp9PayloadType = 99;
259 const ColorSpace kColorSpace(
260 ColorSpace::PrimaryID::kFILM, ColorSpace::TransferID::kBT2020_12,
261 ColorSpace::MatrixID::kBT2020_NCL, ColorSpace::RangeID::kFull);
262 const std::vector<uint8_t> kKeyFramePayload = {0, 1, 2, 3, 4, 5,
263 6, 7, 8, 9, 10};
264 const std::vector<uint8_t> kDeltaFramePayload = {0, 1, 2, 3, 4};
265
266 // Anonymous helper class that generates received packets.
267 class {
268 public:
269 void SetPayload(const std::vector<uint8_t>& payload,
270 VideoFrameType video_frame_type) {
271 video_frame_type_ = video_frame_type;
272 RtpPacketizer::PayloadSizeLimits pay_load_size_limits;
273 // Reduce max payload length to make sure the key frame generates two
274 // packets.
275 pay_load_size_limits.max_payload_len = 8;
276 RTPVideoHeaderVP9 rtp_video_header_vp9;
277 rtp_video_header_vp9.InitRTPVideoHeaderVP9();
278 rtp_video_header_vp9.inter_pic_predicted =
279 (video_frame_type == VideoFrameType::kVideoFrameDelta);
280 rtp_packetizer_ = std::make_unique<RtpPacketizerVp9>(
281 payload, pay_load_size_limits, rtp_video_header_vp9);
282 }
283
284 size_t NumPackets() { return rtp_packetizer_->NumPackets(); }
285 void SetColorSpace(const ColorSpace& color_space) {
286 color_space_ = color_space;
287 }
288
289 RtpPacketReceived NextPacket() {
290 RtpHeaderExtensionMap extension_map;
291 extension_map.Register<ColorSpaceExtension>(1);
292 RtpPacketToSend packet_to_send(&extension_map);
293 packet_to_send.SetSequenceNumber(sequence_number_++);
294 packet_to_send.SetSsrc(kSsrc);
295 packet_to_send.SetPayloadType(kVp9PayloadType);
296 bool include_color_space =
297 (rtp_packetizer_->NumPackets() == 1u &&
298 video_frame_type_ == VideoFrameType::kVideoFrameKey);
299 if (include_color_space) {
300 EXPECT_TRUE(
301 packet_to_send.SetExtension<ColorSpaceExtension>(color_space_));
302 }
303 rtp_packetizer_->NextPacket(&packet_to_send);
304
305 RtpPacketReceived received_packet(&extension_map);
306 received_packet.Parse(packet_to_send.data(), packet_to_send.size());
307 return received_packet;
308 }
309
310 private:
311 uint16_t sequence_number_ = 0;
312 VideoFrameType video_frame_type_;
313 ColorSpace color_space_;
314 std::unique_ptr<RtpPacketizer> rtp_packetizer_;
315 } received_packet_generator;
316 received_packet_generator.SetColorSpace(kColorSpace);
317
318 // Prepare the receiver for VP9.
319 VideoCodec codec;
320 codec.plType = kVp9PayloadType;
321 codec.codecType = kVideoCodecVP9;
322 std::map<std::string, std::string> codec_params;
323 rtp_video_stream_receiver_->AddReceiveCodec(codec, codec_params,
324 /*raw_payload=*/false);
325
326 // Generate key frame packets.
327 received_packet_generator.SetPayload(kKeyFramePayload,
328 VideoFrameType::kVideoFrameKey);
329 EXPECT_EQ(received_packet_generator.NumPackets(), 2u);
330 RtpPacketReceived key_frame_packet1 = received_packet_generator.NextPacket();
331 RtpPacketReceived key_frame_packet2 = received_packet_generator.NextPacket();
332
333 // Generate delta frame packet.
334 received_packet_generator.SetPayload(kDeltaFramePayload,
335 VideoFrameType::kVideoFrameDelta);
336 EXPECT_EQ(received_packet_generator.NumPackets(), 1u);
337 RtpPacketReceived delta_frame_packet = received_packet_generator.NextPacket();
338
339 rtp_video_stream_receiver_->StartReceive();
340 mock_on_complete_frame_callback_.AppendExpectedBitstream(
341 kKeyFramePayload.data(), kKeyFramePayload.size());
342
343 // Send the key frame and expect a callback with color space information.
344 EXPECT_FALSE(key_frame_packet1.GetExtension<ColorSpaceExtension>());
345 EXPECT_TRUE(key_frame_packet2.GetExtension<ColorSpaceExtension>());
346 rtp_video_stream_receiver_->OnRtpPacket(key_frame_packet1);
347 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
348 .WillOnce(Invoke([kColorSpace](video_coding::EncodedFrame* frame) {
349 ASSERT_TRUE(frame->EncodedImage().ColorSpace());
350 EXPECT_EQ(*frame->EncodedImage().ColorSpace(), kColorSpace);
351 }));
352 rtp_video_stream_receiver_->OnRtpPacket(key_frame_packet2);
353 // Resend the first key frame packet to simulate padding for example.
354 rtp_video_stream_receiver_->OnRtpPacket(key_frame_packet1);
355
356 mock_on_complete_frame_callback_.ClearExpectedBitstream();
357 mock_on_complete_frame_callback_.AppendExpectedBitstream(
358 kDeltaFramePayload.data(), kDeltaFramePayload.size());
359
360 // Expect delta frame to have color space set even though color space not
361 // included in the RTP packet.
362 EXPECT_FALSE(delta_frame_packet.GetExtension<ColorSpaceExtension>());
363 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
364 .WillOnce(Invoke([kColorSpace](video_coding::EncodedFrame* frame) {
365 ASSERT_TRUE(frame->EncodedImage().ColorSpace());
366 EXPECT_EQ(*frame->EncodedImage().ColorSpace(), kColorSpace);
367 }));
368 rtp_video_stream_receiver_->OnRtpPacket(delta_frame_packet);
369 }
370
TEST_F(RtpVideoStreamReceiver2Test,GenericKeyFrame)371 TEST_F(RtpVideoStreamReceiver2Test, GenericKeyFrame) {
372 RtpPacketReceived rtp_packet;
373 rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
374 rtp_packet.SetPayloadType(kPayloadType);
375 rtp_packet.SetSequenceNumber(1);
376 RTPVideoHeader video_header =
377 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
378 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
379 data.size());
380 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
381 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
382 video_header);
383 }
384
TEST_F(RtpVideoStreamReceiver2Test,PacketInfoIsPropagatedIntoVideoFrames)385 TEST_F(RtpVideoStreamReceiver2Test, PacketInfoIsPropagatedIntoVideoFrames) {
386 constexpr uint64_t kAbsoluteCaptureTimestamp = 12;
387 constexpr int kId0 = 1;
388
389 RtpHeaderExtensionMap extension_map;
390 extension_map.Register<AbsoluteCaptureTimeExtension>(kId0);
391 RtpPacketReceived rtp_packet(&extension_map);
392 rtp_packet.SetPayloadType(kPayloadType);
393 rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
394 rtp_packet.SetSequenceNumber(1);
395 rtp_packet.SetTimestamp(1);
396 rtp_packet.SetSsrc(kSsrc);
397 rtp_packet.SetExtension<AbsoluteCaptureTimeExtension>(
398 AbsoluteCaptureTime{kAbsoluteCaptureTimestamp,
399 /*estimated_capture_clock_offset=*/absl::nullopt});
400
401 RTPVideoHeader video_header =
402 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
403 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
404 data.size());
405 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
406 .WillOnce(Invoke(
407 [kAbsoluteCaptureTimestamp](video_coding::EncodedFrame* frame) {
408 EXPECT_THAT(GetAbsoluteCaptureTimestamps(frame),
409 ElementsAre(kAbsoluteCaptureTimestamp));
410 }));
411 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
412 video_header);
413 }
414
TEST_F(RtpVideoStreamReceiver2Test,MissingAbsoluteCaptureTimeIsFilledWithExtrapolatedValue)415 TEST_F(RtpVideoStreamReceiver2Test,
416 MissingAbsoluteCaptureTimeIsFilledWithExtrapolatedValue) {
417 constexpr uint64_t kAbsoluteCaptureTimestamp = 12;
418 constexpr int kId0 = 1;
419
420 RtpHeaderExtensionMap extension_map;
421 extension_map.Register<AbsoluteCaptureTimeExtension>(kId0);
422 RtpPacketReceived rtp_packet(&extension_map);
423 rtp_packet.SetPayloadType(kPayloadType);
424
425 rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
426 uint16_t sequence_number = 1;
427 uint32_t rtp_timestamp = 1;
428 rtp_packet.SetSequenceNumber(sequence_number);
429 rtp_packet.SetTimestamp(rtp_timestamp);
430 rtp_packet.SetSsrc(kSsrc);
431 rtp_packet.SetExtension<AbsoluteCaptureTimeExtension>(
432 AbsoluteCaptureTime{kAbsoluteCaptureTimestamp,
433 /*estimated_capture_clock_offset=*/absl::nullopt});
434
435 RTPVideoHeader video_header =
436 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
437 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
438 data.size());
439 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
440 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
441 video_header);
442
443 // Rtp packet without absolute capture time.
444 rtp_packet = RtpPacketReceived(&extension_map);
445 rtp_packet.SetPayloadType(kPayloadType);
446 rtp_packet.SetSequenceNumber(++sequence_number);
447 rtp_packet.SetTimestamp(++rtp_timestamp);
448 rtp_packet.SetSsrc(kSsrc);
449
450 // There is no absolute capture time in the second packet.
451 // Expect rtp video stream receiver to extrapolate it for the resulting video
452 // frame using absolute capture time from the previous packet.
453 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
454 .WillOnce(Invoke([](video_coding::EncodedFrame* frame) {
455 EXPECT_THAT(GetAbsoluteCaptureTimestamps(frame), SizeIs(1));
456 }));
457 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
458 video_header);
459 }
460
TEST_F(RtpVideoStreamReceiver2Test,NoInfiniteRecursionOnEncapsulatedRedPacket)461 TEST_F(RtpVideoStreamReceiver2Test,
462 NoInfiniteRecursionOnEncapsulatedRedPacket) {
463 const std::vector<uint8_t> data({
464 0x80, // RTP version.
465 kRedPayloadType, // Payload type.
466 0, 0, 0, 0, 0, 0, // Don't care.
467 0, 0, 0x4, 0x57, // SSRC
468 kRedPayloadType, // RED header.
469 0, 0, 0, 0, 0 // Don't care.
470 });
471 RtpPacketReceived packet;
472 EXPECT_TRUE(packet.Parse(data.data(), data.size()));
473 rtp_video_stream_receiver_->StartReceive();
474 rtp_video_stream_receiver_->OnRtpPacket(packet);
475 }
476
TEST_F(RtpVideoStreamReceiver2Test,DropsPacketWithRedPayloadTypeAndEmptyPayload)477 TEST_F(RtpVideoStreamReceiver2Test,
478 DropsPacketWithRedPayloadTypeAndEmptyPayload) {
479 const uint8_t kRedPayloadType = 125;
480 config_.rtp.red_payload_type = kRedPayloadType;
481 SetUp(); // re-create rtp_video_stream_receiver with red payload type.
482 // clang-format off
483 const uint8_t data[] = {
484 0x80, // RTP version.
485 kRedPayloadType, // Payload type.
486 0, 0, 0, 0, 0, 0, // Don't care.
487 0, 0, 0x4, 0x57, // SSRC
488 // Empty rtp payload.
489 };
490 // clang-format on
491 RtpPacketReceived packet;
492 // Manually convert to CopyOnWriteBuffer to be sure capacity == size
493 // and asan bot can catch read buffer overflow.
494 EXPECT_TRUE(packet.Parse(rtc::CopyOnWriteBuffer(data)));
495 rtp_video_stream_receiver_->StartReceive();
496 rtp_video_stream_receiver_->OnRtpPacket(packet);
497 // Expect asan doesn't find anything.
498 }
499
TEST_F(RtpVideoStreamReceiver2Test,GenericKeyFrameBitstreamError)500 TEST_F(RtpVideoStreamReceiver2Test, GenericKeyFrameBitstreamError) {
501 RtpPacketReceived rtp_packet;
502 rtp_packet.SetPayloadType(kPayloadType);
503 rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
504 rtp_packet.SetSequenceNumber(1);
505 RTPVideoHeader video_header =
506 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
507 constexpr uint8_t expected_bitsteam[] = {1, 2, 3, 0xff};
508 mock_on_complete_frame_callback_.AppendExpectedBitstream(
509 expected_bitsteam, sizeof(expected_bitsteam));
510 EXPECT_CALL(mock_on_complete_frame_callback_,
511 DoOnCompleteFrameFailBitstream(_));
512 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
513 video_header);
514 }
515
516 class RtpVideoStreamReceiver2TestH264
517 : public RtpVideoStreamReceiver2Test,
518 public ::testing::WithParamInterface<std::string> {
519 protected:
RtpVideoStreamReceiver2TestH264()520 RtpVideoStreamReceiver2TestH264() : RtpVideoStreamReceiver2Test(GetParam()) {}
521 };
522
523 INSTANTIATE_TEST_SUITE_P(SpsPpsIdrIsKeyframe,
524 RtpVideoStreamReceiver2TestH264,
525 Values("", "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/"));
526
527 // Fails on MSAN: https://bugs.chromium.org/p/webrtc/issues/detail?id=11376.
528 #if defined(MEMORY_SANITIZER)
529 #define MAYBE_InBandSpsPps DISABLED_InBandSpsPps
530 #else
531 #define MAYBE_InBandSpsPps InBandSpsPps
532 #endif
TEST_P(RtpVideoStreamReceiver2TestH264,MAYBE_InBandSpsPps)533 TEST_P(RtpVideoStreamReceiver2TestH264, MAYBE_InBandSpsPps) {
534 rtc::CopyOnWriteBuffer sps_data;
535 RtpPacketReceived rtp_packet;
536 RTPVideoHeader sps_video_header = GetDefaultH264VideoHeader();
537 AddSps(&sps_video_header, 0, &sps_data);
538 rtp_packet.SetSequenceNumber(0);
539 rtp_packet.SetPayloadType(kPayloadType);
540 sps_video_header.is_first_packet_in_frame = true;
541 sps_video_header.frame_type = VideoFrameType::kEmptyFrame;
542 mock_on_complete_frame_callback_.AppendExpectedBitstream(
543 kH264StartCode, sizeof(kH264StartCode));
544 mock_on_complete_frame_callback_.AppendExpectedBitstream(sps_data.data(),
545 sps_data.size());
546 rtp_video_stream_receiver_->OnReceivedPayloadData(sps_data, rtp_packet,
547 sps_video_header);
548
549 rtc::CopyOnWriteBuffer pps_data;
550 RTPVideoHeader pps_video_header = GetDefaultH264VideoHeader();
551 AddPps(&pps_video_header, 0, 1, &pps_data);
552 rtp_packet.SetSequenceNumber(1);
553 pps_video_header.is_first_packet_in_frame = true;
554 pps_video_header.frame_type = VideoFrameType::kEmptyFrame;
555 mock_on_complete_frame_callback_.AppendExpectedBitstream(
556 kH264StartCode, sizeof(kH264StartCode));
557 mock_on_complete_frame_callback_.AppendExpectedBitstream(pps_data.data(),
558 pps_data.size());
559 rtp_video_stream_receiver_->OnReceivedPayloadData(pps_data, rtp_packet,
560 pps_video_header);
561
562 rtc::CopyOnWriteBuffer idr_data;
563 RTPVideoHeader idr_video_header = GetDefaultH264VideoHeader();
564 AddIdr(&idr_video_header, 1);
565 rtp_packet.SetSequenceNumber(2);
566 idr_video_header.is_first_packet_in_frame = true;
567 idr_video_header.is_last_packet_in_frame = true;
568 idr_video_header.frame_type = VideoFrameType::kVideoFrameKey;
569 const uint8_t idr[] = {0x65, 1, 2, 3};
570 idr_data.AppendData(idr);
571 mock_on_complete_frame_callback_.AppendExpectedBitstream(
572 kH264StartCode, sizeof(kH264StartCode));
573 mock_on_complete_frame_callback_.AppendExpectedBitstream(idr_data.data(),
574 idr_data.size());
575 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
576 rtp_video_stream_receiver_->OnReceivedPayloadData(idr_data, rtp_packet,
577 idr_video_header);
578 }
579
TEST_P(RtpVideoStreamReceiver2TestH264,OutOfBandFmtpSpsPps)580 TEST_P(RtpVideoStreamReceiver2TestH264, OutOfBandFmtpSpsPps) {
581 constexpr int kPayloadType = 99;
582 VideoCodec codec;
583 codec.plType = kPayloadType;
584 std::map<std::string, std::string> codec_params;
585 // Example parameter sets from https://tools.ietf.org/html/rfc3984#section-8.2
586 // .
587 codec_params.insert(
588 {cricket::kH264FmtpSpropParameterSets, "Z0IACpZTBYmI,aMljiA=="});
589 rtp_video_stream_receiver_->AddReceiveCodec(codec, codec_params,
590 /*raw_payload=*/false);
591 const uint8_t binary_sps[] = {0x67, 0x42, 0x00, 0x0a, 0x96,
592 0x53, 0x05, 0x89, 0x88};
593 mock_on_complete_frame_callback_.AppendExpectedBitstream(
594 kH264StartCode, sizeof(kH264StartCode));
595 mock_on_complete_frame_callback_.AppendExpectedBitstream(binary_sps,
596 sizeof(binary_sps));
597 const uint8_t binary_pps[] = {0x68, 0xc9, 0x63, 0x88};
598 mock_on_complete_frame_callback_.AppendExpectedBitstream(
599 kH264StartCode, sizeof(kH264StartCode));
600 mock_on_complete_frame_callback_.AppendExpectedBitstream(binary_pps,
601 sizeof(binary_pps));
602
603 RtpPacketReceived rtp_packet;
604 RTPVideoHeader video_header = GetDefaultH264VideoHeader();
605 AddIdr(&video_header, 0);
606 rtp_packet.SetPayloadType(kPayloadType);
607 rtp_packet.SetSequenceNumber(2);
608 video_header.is_first_packet_in_frame = true;
609 video_header.is_last_packet_in_frame = true;
610 video_header.codec = kVideoCodecH264;
611 video_header.frame_type = VideoFrameType::kVideoFrameKey;
612 rtc::CopyOnWriteBuffer data({1, 2, 3});
613 mock_on_complete_frame_callback_.AppendExpectedBitstream(
614 kH264StartCode, sizeof(kH264StartCode));
615 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
616 data.size());
617 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
618 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
619 video_header);
620 }
621
TEST_F(RtpVideoStreamReceiver2Test,PaddingInMediaStream)622 TEST_F(RtpVideoStreamReceiver2Test, PaddingInMediaStream) {
623 RtpPacketReceived rtp_packet;
624 RTPVideoHeader video_header = GetDefaultH264VideoHeader();
625 rtc::CopyOnWriteBuffer data({1, 2, 3});
626 rtp_packet.SetPayloadType(kPayloadType);
627 rtp_packet.SetSequenceNumber(2);
628 video_header.is_first_packet_in_frame = true;
629 video_header.is_last_packet_in_frame = true;
630 video_header.codec = kVideoCodecGeneric;
631 video_header.frame_type = VideoFrameType::kVideoFrameKey;
632 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
633 data.size());
634
635 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
636 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
637 video_header);
638
639 rtp_packet.SetSequenceNumber(3);
640 rtp_video_stream_receiver_->OnReceivedPayloadData({}, rtp_packet,
641 video_header);
642
643 rtp_packet.SetSequenceNumber(4);
644 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
645 video_header.frame_type = VideoFrameType::kVideoFrameDelta;
646 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
647 video_header);
648
649 rtp_packet.SetSequenceNumber(6);
650 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
651 video_header);
652
653 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
654 rtp_packet.SetSequenceNumber(5);
655 rtp_video_stream_receiver_->OnReceivedPayloadData({}, rtp_packet,
656 video_header);
657 }
658
TEST_F(RtpVideoStreamReceiver2Test,RequestKeyframeIfFirstFrameIsDelta)659 TEST_F(RtpVideoStreamReceiver2Test, RequestKeyframeIfFirstFrameIsDelta) {
660 RtpPacketReceived rtp_packet;
661 rtp_packet.SetPayloadType(kPayloadType);
662 rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
663 rtp_packet.SetSequenceNumber(1);
664 RTPVideoHeader video_header =
665 GetGenericVideoHeader(VideoFrameType::kVideoFrameDelta);
666 EXPECT_CALL(mock_key_frame_request_sender_, RequestKeyFrame());
667 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
668 video_header);
669 }
670
TEST_F(RtpVideoStreamReceiver2Test,RequestKeyframeWhenPacketBufferGetsFull)671 TEST_F(RtpVideoStreamReceiver2Test, RequestKeyframeWhenPacketBufferGetsFull) {
672 constexpr int kPacketBufferMaxSize = 2048;
673
674 RtpPacketReceived rtp_packet;
675 rtp_packet.SetPayloadType(kPayloadType);
676 rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
677 RTPVideoHeader video_header =
678 GetGenericVideoHeader(VideoFrameType::kVideoFrameDelta);
679 // Incomplete frames so that the packet buffer is filling up.
680 video_header.is_last_packet_in_frame = false;
681 uint16_t start_sequence_number = 1234;
682 rtp_packet.SetSequenceNumber(start_sequence_number);
683 while (rtp_packet.SequenceNumber() - start_sequence_number <
684 kPacketBufferMaxSize) {
685 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
686 video_header);
687 rtp_packet.SetSequenceNumber(rtp_packet.SequenceNumber() + 2);
688 }
689
690 EXPECT_CALL(mock_key_frame_request_sender_, RequestKeyFrame());
691 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
692 video_header);
693 }
694
TEST_F(RtpVideoStreamReceiver2Test,SecondarySinksGetRtpNotifications)695 TEST_F(RtpVideoStreamReceiver2Test, SecondarySinksGetRtpNotifications) {
696 rtp_video_stream_receiver_->StartReceive();
697
698 MockRtpPacketSink secondary_sink_1;
699 MockRtpPacketSink secondary_sink_2;
700
701 rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink_1);
702 rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink_2);
703
704 auto rtp_packet = CreateRtpPacketReceived();
705 EXPECT_CALL(secondary_sink_1, OnRtpPacket(SamePacketAs(*rtp_packet)));
706 EXPECT_CALL(secondary_sink_2, OnRtpPacket(SamePacketAs(*rtp_packet)));
707
708 rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
709
710 // Test tear-down.
711 rtp_video_stream_receiver_->StopReceive();
712 rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink_1);
713 rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink_2);
714 }
715
TEST_F(RtpVideoStreamReceiver2Test,RemovedSecondarySinksGetNoRtpNotifications)716 TEST_F(RtpVideoStreamReceiver2Test,
717 RemovedSecondarySinksGetNoRtpNotifications) {
718 rtp_video_stream_receiver_->StartReceive();
719
720 MockRtpPacketSink secondary_sink;
721
722 rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink);
723 rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink);
724
725 auto rtp_packet = CreateRtpPacketReceived();
726
727 EXPECT_CALL(secondary_sink, OnRtpPacket(_)).Times(0);
728
729 rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
730
731 // Test tear-down.
732 rtp_video_stream_receiver_->StopReceive();
733 }
734
TEST_F(RtpVideoStreamReceiver2Test,OnlyRemovedSecondarySinksExcludedFromNotifications)735 TEST_F(RtpVideoStreamReceiver2Test,
736 OnlyRemovedSecondarySinksExcludedFromNotifications) {
737 rtp_video_stream_receiver_->StartReceive();
738
739 MockRtpPacketSink kept_secondary_sink;
740 MockRtpPacketSink removed_secondary_sink;
741
742 rtp_video_stream_receiver_->AddSecondarySink(&kept_secondary_sink);
743 rtp_video_stream_receiver_->AddSecondarySink(&removed_secondary_sink);
744 rtp_video_stream_receiver_->RemoveSecondarySink(&removed_secondary_sink);
745
746 auto rtp_packet = CreateRtpPacketReceived();
747 EXPECT_CALL(kept_secondary_sink, OnRtpPacket(SamePacketAs(*rtp_packet)));
748
749 rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
750
751 // Test tear-down.
752 rtp_video_stream_receiver_->StopReceive();
753 rtp_video_stream_receiver_->RemoveSecondarySink(&kept_secondary_sink);
754 }
755
TEST_F(RtpVideoStreamReceiver2Test,SecondariesOfNonStartedStreamGetNoNotifications)756 TEST_F(RtpVideoStreamReceiver2Test,
757 SecondariesOfNonStartedStreamGetNoNotifications) {
758 // Explicitly showing that the stream is not in the |started| state,
759 // regardless of whether streams start out |started| or |stopped|.
760 rtp_video_stream_receiver_->StopReceive();
761
762 MockRtpPacketSink secondary_sink;
763 rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink);
764
765 auto rtp_packet = CreateRtpPacketReceived();
766 EXPECT_CALL(secondary_sink, OnRtpPacket(_)).Times(0);
767
768 rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
769
770 // Test tear-down.
771 rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink);
772 }
773
TEST_F(RtpVideoStreamReceiver2Test,ParseGenericDescriptorOnePacket)774 TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorOnePacket) {
775 const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
776 const int kSpatialIndex = 1;
777
778 rtp_video_stream_receiver_->StartReceive();
779
780 RtpHeaderExtensionMap extension_map;
781 extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
782 RtpPacketReceived rtp_packet(&extension_map);
783 rtp_packet.SetPayloadType(kPayloadType);
784
785 RtpGenericFrameDescriptor generic_descriptor;
786 generic_descriptor.SetFirstPacketInSubFrame(true);
787 generic_descriptor.SetLastPacketInSubFrame(true);
788 generic_descriptor.SetFrameId(100);
789 generic_descriptor.SetSpatialLayersBitmask(1 << kSpatialIndex);
790 generic_descriptor.AddFrameDependencyDiff(90);
791 generic_descriptor.AddFrameDependencyDiff(80);
792 ASSERT_TRUE(rtp_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
793 generic_descriptor));
794
795 uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
796 memcpy(payload, data.data(), data.size());
797 // The first byte is the header, so we ignore the first byte of |data|.
798 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
799 data.size() - 1);
800
801 rtp_packet.SetMarker(true);
802 rtp_packet.SetPayloadType(kPayloadType);
803 rtp_packet.SetSequenceNumber(1);
804
805 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
806 .WillOnce(Invoke([kSpatialIndex](video_coding::EncodedFrame* frame) {
807 EXPECT_EQ(frame->num_references, 2U);
808 EXPECT_EQ(frame->references[0], frame->id.picture_id - 90);
809 EXPECT_EQ(frame->references[1], frame->id.picture_id - 80);
810 EXPECT_EQ(frame->id.spatial_layer, kSpatialIndex);
811 EXPECT_THAT(frame->PacketInfos(), SizeIs(1));
812 }));
813
814 rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
815 }
816
TEST_F(RtpVideoStreamReceiver2Test,ParseGenericDescriptorTwoPackets)817 TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorTwoPackets) {
818 const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
819 const int kSpatialIndex = 1;
820
821 rtp_video_stream_receiver_->StartReceive();
822
823 RtpHeaderExtensionMap extension_map;
824 extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
825 RtpPacketReceived first_packet(&extension_map);
826
827 RtpGenericFrameDescriptor first_packet_descriptor;
828 first_packet_descriptor.SetFirstPacketInSubFrame(true);
829 first_packet_descriptor.SetLastPacketInSubFrame(false);
830 first_packet_descriptor.SetFrameId(100);
831 first_packet_descriptor.SetSpatialLayersBitmask(1 << kSpatialIndex);
832 first_packet_descriptor.SetResolution(480, 360);
833 ASSERT_TRUE(first_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
834 first_packet_descriptor));
835
836 uint8_t* first_packet_payload = first_packet.SetPayloadSize(data.size());
837 memcpy(first_packet_payload, data.data(), data.size());
838 // The first byte is the header, so we ignore the first byte of |data|.
839 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
840 data.size() - 1);
841
842 first_packet.SetPayloadType(kPayloadType);
843 first_packet.SetSequenceNumber(1);
844 rtp_video_stream_receiver_->OnRtpPacket(first_packet);
845
846 RtpPacketReceived second_packet(&extension_map);
847 RtpGenericFrameDescriptor second_packet_descriptor;
848 second_packet_descriptor.SetFirstPacketInSubFrame(false);
849 second_packet_descriptor.SetLastPacketInSubFrame(true);
850 ASSERT_TRUE(second_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
851 second_packet_descriptor));
852
853 second_packet.SetMarker(true);
854 second_packet.SetPayloadType(kPayloadType);
855 second_packet.SetSequenceNumber(2);
856
857 uint8_t* second_packet_payload = second_packet.SetPayloadSize(data.size());
858 memcpy(second_packet_payload, data.data(), data.size());
859 // The first byte is the header, so we ignore the first byte of |data|.
860 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
861 data.size() - 1);
862
863 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
864 .WillOnce(Invoke([kSpatialIndex](video_coding::EncodedFrame* frame) {
865 EXPECT_EQ(frame->num_references, 0U);
866 EXPECT_EQ(frame->id.spatial_layer, kSpatialIndex);
867 EXPECT_EQ(frame->EncodedImage()._encodedWidth, 480u);
868 EXPECT_EQ(frame->EncodedImage()._encodedHeight, 360u);
869 EXPECT_THAT(frame->PacketInfos(), SizeIs(2));
870 }));
871
872 rtp_video_stream_receiver_->OnRtpPacket(second_packet);
873 }
874
TEST_F(RtpVideoStreamReceiver2Test,ParseGenericDescriptorRawPayload)875 TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorRawPayload) {
876 const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
877 const int kRawPayloadType = 123;
878
879 VideoCodec codec;
880 codec.plType = kRawPayloadType;
881 rtp_video_stream_receiver_->AddReceiveCodec(codec, {}, /*raw_payload=*/true);
882 rtp_video_stream_receiver_->StartReceive();
883
884 RtpHeaderExtensionMap extension_map;
885 extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
886 RtpPacketReceived rtp_packet(&extension_map);
887
888 RtpGenericFrameDescriptor generic_descriptor;
889 generic_descriptor.SetFirstPacketInSubFrame(true);
890 generic_descriptor.SetLastPacketInSubFrame(true);
891 ASSERT_TRUE(rtp_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
892 generic_descriptor));
893
894 uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
895 memcpy(payload, data.data(), data.size());
896 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
897 data.size());
898
899 rtp_packet.SetMarker(true);
900 rtp_packet.SetPayloadType(kRawPayloadType);
901 rtp_packet.SetSequenceNumber(1);
902
903 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame);
904 rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
905 }
906
TEST_F(RtpVideoStreamReceiver2Test,UnwrapsFrameId)907 TEST_F(RtpVideoStreamReceiver2Test, UnwrapsFrameId) {
908 const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
909 const int kPayloadType = 123;
910
911 VideoCodec codec;
912 codec.plType = kPayloadType;
913 rtp_video_stream_receiver_->AddReceiveCodec(codec, {}, /*raw_payload=*/true);
914 rtp_video_stream_receiver_->StartReceive();
915 RtpHeaderExtensionMap extension_map;
916 extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
917
918 uint16_t rtp_sequence_number = 1;
919 auto inject_packet = [&](uint16_t wrapped_frame_id) {
920 RtpPacketReceived rtp_packet(&extension_map);
921
922 RtpGenericFrameDescriptor generic_descriptor;
923 generic_descriptor.SetFirstPacketInSubFrame(true);
924 generic_descriptor.SetLastPacketInSubFrame(true);
925 generic_descriptor.SetFrameId(wrapped_frame_id);
926 ASSERT_TRUE(rtp_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
927 generic_descriptor));
928
929 uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
930 ASSERT_TRUE(payload);
931 memcpy(payload, data.data(), data.size());
932 mock_on_complete_frame_callback_.ClearExpectedBitstream();
933 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
934 data.size());
935 rtp_packet.SetMarker(true);
936 rtp_packet.SetPayloadType(kPayloadType);
937 rtp_packet.SetSequenceNumber(++rtp_sequence_number);
938 rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
939 };
940
941 int64_t first_picture_id;
942 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
943 .WillOnce([&](video_coding::EncodedFrame* frame) {
944 first_picture_id = frame->id.picture_id;
945 });
946 inject_packet(/*wrapped_frame_id=*/0xffff);
947
948 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
949 .WillOnce([&](video_coding::EncodedFrame* frame) {
950 EXPECT_EQ(frame->id.picture_id - first_picture_id, 3);
951 });
952 inject_packet(/*wrapped_frame_id=*/0x0002);
953 }
954
955 class RtpVideoStreamReceiver2DependencyDescriptorTest
956 : public RtpVideoStreamReceiver2Test {
957 public:
RtpVideoStreamReceiver2DependencyDescriptorTest()958 RtpVideoStreamReceiver2DependencyDescriptorTest() {
959 VideoCodec codec;
960 codec.plType = payload_type_;
961 rtp_video_stream_receiver_->AddReceiveCodec(codec, {},
962 /*raw_payload=*/true);
963 extension_map_.Register<RtpDependencyDescriptorExtension>(7);
964 rtp_video_stream_receiver_->StartReceive();
965 }
966
967 // Returns some valid structure for the DependencyDescriptors.
968 // First template of that structure always fit for a key frame.
CreateStreamStructure()969 static FrameDependencyStructure CreateStreamStructure() {
970 FrameDependencyStructure stream_structure;
971 stream_structure.num_decode_targets = 1;
972 stream_structure.templates = {
973 FrameDependencyTemplate().Dtis("S"),
974 FrameDependencyTemplate().Dtis("S").FrameDiffs({1}),
975 };
976 return stream_structure;
977 }
978
InjectPacketWith(const FrameDependencyStructure & stream_structure,const DependencyDescriptor & dependency_descriptor)979 void InjectPacketWith(const FrameDependencyStructure& stream_structure,
980 const DependencyDescriptor& dependency_descriptor) {
981 const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
982 RtpPacketReceived rtp_packet(&extension_map_);
983 ASSERT_TRUE(rtp_packet.SetExtension<RtpDependencyDescriptorExtension>(
984 stream_structure, dependency_descriptor));
985 uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
986 ASSERT_TRUE(payload);
987 memcpy(payload, data.data(), data.size());
988 mock_on_complete_frame_callback_.ClearExpectedBitstream();
989 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
990 data.size());
991 rtp_packet.SetMarker(true);
992 rtp_packet.SetPayloadType(payload_type_);
993 rtp_packet.SetSequenceNumber(++rtp_sequence_number_);
994 rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
995 }
996
997 private:
998 const int payload_type_ = 123;
999 RtpHeaderExtensionMap extension_map_;
1000 uint16_t rtp_sequence_number_ = 321;
1001 };
1002
TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,UnwrapsFrameId)1003 TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest, UnwrapsFrameId) {
1004 FrameDependencyStructure stream_structure = CreateStreamStructure();
1005
1006 DependencyDescriptor keyframe_descriptor;
1007 keyframe_descriptor.attached_structure =
1008 std::make_unique<FrameDependencyStructure>(stream_structure);
1009 keyframe_descriptor.frame_dependencies = stream_structure.templates[0];
1010 keyframe_descriptor.frame_number = 0xfff0;
1011 // DependencyDescriptor doesn't support reordering delta frame before
1012 // keyframe. Thus feed a key frame first, then test reodered delta frames.
1013 int64_t first_picture_id;
1014 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
1015 .WillOnce([&](video_coding::EncodedFrame* frame) {
1016 first_picture_id = frame->id.picture_id;
1017 });
1018 InjectPacketWith(stream_structure, keyframe_descriptor);
1019
1020 DependencyDescriptor deltaframe1_descriptor;
1021 deltaframe1_descriptor.frame_dependencies = stream_structure.templates[1];
1022 deltaframe1_descriptor.frame_number = 0xfffe;
1023
1024 DependencyDescriptor deltaframe2_descriptor;
1025 deltaframe1_descriptor.frame_dependencies = stream_structure.templates[1];
1026 deltaframe2_descriptor.frame_number = 0x0002;
1027
1028 // Parser should unwrap frame ids correctly even if packets were reordered by
1029 // the network.
1030 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
1031 .WillOnce([&](video_coding::EncodedFrame* frame) {
1032 // 0x0002 - 0xfff0
1033 EXPECT_EQ(frame->id.picture_id - first_picture_id, 18);
1034 })
1035 .WillOnce([&](video_coding::EncodedFrame* frame) {
1036 // 0xfffe - 0xfff0
1037 EXPECT_EQ(frame->id.picture_id - first_picture_id, 14);
1038 });
1039 InjectPacketWith(stream_structure, deltaframe2_descriptor);
1040 InjectPacketWith(stream_structure, deltaframe1_descriptor);
1041 }
1042
TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,DropsLateDeltaFramePacketWithDependencyDescriptorExtension)1043 TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,
1044 DropsLateDeltaFramePacketWithDependencyDescriptorExtension) {
1045 FrameDependencyStructure stream_structure1 = CreateStreamStructure();
1046 FrameDependencyStructure stream_structure2 = CreateStreamStructure();
1047 // Make sure template ids for these two structures do not collide:
1048 // adjust structure_id (that is also used as template id offset).
1049 stream_structure1.structure_id = 13;
1050 stream_structure2.structure_id =
1051 stream_structure1.structure_id + stream_structure1.templates.size();
1052
1053 DependencyDescriptor keyframe1_descriptor;
1054 keyframe1_descriptor.attached_structure =
1055 std::make_unique<FrameDependencyStructure>(stream_structure1);
1056 keyframe1_descriptor.frame_dependencies = stream_structure1.templates[0];
1057 keyframe1_descriptor.frame_number = 1;
1058 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame);
1059 InjectPacketWith(stream_structure1, keyframe1_descriptor);
1060
1061 // Pass in 2nd key frame with different structure.
1062 DependencyDescriptor keyframe2_descriptor;
1063 keyframe2_descriptor.attached_structure =
1064 std::make_unique<FrameDependencyStructure>(stream_structure2);
1065 keyframe2_descriptor.frame_dependencies = stream_structure2.templates[0];
1066 keyframe2_descriptor.frame_number = 3;
1067 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame);
1068 InjectPacketWith(stream_structure2, keyframe2_descriptor);
1069
1070 // Pass in late delta frame that uses structure of the 1st key frame.
1071 DependencyDescriptor deltaframe_descriptor;
1072 deltaframe_descriptor.frame_dependencies = stream_structure1.templates[0];
1073 deltaframe_descriptor.frame_number = 2;
1074 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame).Times(0);
1075 InjectPacketWith(stream_structure1, deltaframe_descriptor);
1076 }
1077
TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,DropsLateKeyFramePacketWithDependencyDescriptorExtension)1078 TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,
1079 DropsLateKeyFramePacketWithDependencyDescriptorExtension) {
1080 FrameDependencyStructure stream_structure1 = CreateStreamStructure();
1081 FrameDependencyStructure stream_structure2 = CreateStreamStructure();
1082 // Make sure template ids for these two structures do not collide:
1083 // adjust structure_id (that is also used as template id offset).
1084 stream_structure1.structure_id = 13;
1085 stream_structure2.structure_id =
1086 stream_structure1.structure_id + stream_structure1.templates.size();
1087
1088 DependencyDescriptor keyframe1_descriptor;
1089 keyframe1_descriptor.attached_structure =
1090 std::make_unique<FrameDependencyStructure>(stream_structure1);
1091 keyframe1_descriptor.frame_dependencies = stream_structure1.templates[0];
1092 keyframe1_descriptor.frame_number = 1;
1093
1094 DependencyDescriptor keyframe2_descriptor;
1095 keyframe2_descriptor.attached_structure =
1096 std::make_unique<FrameDependencyStructure>(stream_structure2);
1097 keyframe2_descriptor.frame_dependencies = stream_structure2.templates[0];
1098 keyframe2_descriptor.frame_number = 3;
1099
1100 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
1101 .WillOnce([&](video_coding::EncodedFrame* frame) {
1102 EXPECT_EQ(frame->id.picture_id & 0xFFFF, 3);
1103 });
1104 InjectPacketWith(stream_structure2, keyframe2_descriptor);
1105 InjectPacketWith(stream_structure1, keyframe1_descriptor);
1106
1107 // Pass in delta frame that uses structure of the 2nd key frame. Late key
1108 // frame shouldn't block it.
1109 DependencyDescriptor deltaframe_descriptor;
1110 deltaframe_descriptor.frame_dependencies = stream_structure2.templates[0];
1111 deltaframe_descriptor.frame_number = 4;
1112 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
1113 .WillOnce([&](video_coding::EncodedFrame* frame) {
1114 EXPECT_EQ(frame->id.picture_id & 0xFFFF, 4);
1115 });
1116 InjectPacketWith(stream_structure2, deltaframe_descriptor);
1117 }
1118
1119 #if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
1120 using RtpVideoStreamReceiver2DeathTest = RtpVideoStreamReceiver2Test;
TEST_F(RtpVideoStreamReceiver2DeathTest,RepeatedSecondarySinkDisallowed)1121 TEST_F(RtpVideoStreamReceiver2DeathTest, RepeatedSecondarySinkDisallowed) {
1122 MockRtpPacketSink secondary_sink;
1123
1124 rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink);
1125 EXPECT_DEATH(rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink),
1126 "");
1127
1128 // Test tear-down.
1129 rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink);
1130 }
1131 #endif
1132
TEST_F(RtpVideoStreamReceiver2Test,TransformFrame)1133 TEST_F(RtpVideoStreamReceiver2Test, TransformFrame) {
1134 rtc::scoped_refptr<MockFrameTransformer> mock_frame_transformer =
1135 new rtc::RefCountedObject<testing::NiceMock<MockFrameTransformer>>();
1136 EXPECT_CALL(*mock_frame_transformer,
1137 RegisterTransformedFrameSinkCallback(_, config_.rtp.remote_ssrc));
1138 auto receiver = std::make_unique<RtpVideoStreamReceiver2>(
1139 TaskQueueBase::Current(), Clock::GetRealTimeClock(), &mock_transport_,
1140 nullptr, nullptr, &config_, rtp_receive_statistics_.get(), nullptr,
1141 nullptr, process_thread_.get(), &mock_nack_sender_, nullptr,
1142 &mock_on_complete_frame_callback_, nullptr, mock_frame_transformer);
1143 VideoCodec video_codec;
1144 video_codec.plType = kPayloadType;
1145 video_codec.codecType = kVideoCodecGeneric;
1146 receiver->AddReceiveCodec(video_codec, {}, /*raw_payload=*/false);
1147
1148 RtpPacketReceived rtp_packet;
1149 rtp_packet.SetPayloadType(kPayloadType);
1150 rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
1151 rtp_packet.SetSequenceNumber(1);
1152 RTPVideoHeader video_header =
1153 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
1154 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
1155 data.size());
1156 EXPECT_CALL(*mock_frame_transformer, Transform(_));
1157 receiver->OnReceivedPayloadData(data, rtp_packet, video_header);
1158
1159 EXPECT_CALL(*mock_frame_transformer,
1160 UnregisterTransformedFrameSinkCallback(config_.rtp.remote_ssrc));
1161 receiver = nullptr;
1162 }
1163
1164 // Test default behavior and when playout delay is overridden by field trial.
1165 const PlayoutDelay kTransmittedPlayoutDelay = {100, 200};
1166 const PlayoutDelay kForcedPlayoutDelay = {70, 90};
1167 struct PlayoutDelayOptions {
1168 std::string field_trial;
1169 PlayoutDelay expected_delay;
1170 };
1171 const PlayoutDelayOptions kDefaultBehavior = {
1172 /*field_trial=*/"", /*expected_delay=*/kTransmittedPlayoutDelay};
1173 const PlayoutDelayOptions kOverridePlayoutDelay = {
1174 /*field_trial=*/"WebRTC-ForcePlayoutDelay/min_ms:70,max_ms:90/",
1175 /*expected_delay=*/kForcedPlayoutDelay};
1176
1177 class RtpVideoStreamReceiver2TestPlayoutDelay
1178 : public RtpVideoStreamReceiver2Test,
1179 public ::testing::WithParamInterface<PlayoutDelayOptions> {
1180 protected:
RtpVideoStreamReceiver2TestPlayoutDelay()1181 RtpVideoStreamReceiver2TestPlayoutDelay()
1182 : RtpVideoStreamReceiver2Test(GetParam().field_trial) {}
1183 };
1184
1185 INSTANTIATE_TEST_SUITE_P(PlayoutDelay,
1186 RtpVideoStreamReceiver2TestPlayoutDelay,
1187 Values(kDefaultBehavior, kOverridePlayoutDelay));
1188
TEST_P(RtpVideoStreamReceiver2TestPlayoutDelay,PlayoutDelay)1189 TEST_P(RtpVideoStreamReceiver2TestPlayoutDelay, PlayoutDelay) {
1190 rtc::CopyOnWriteBuffer payload_data({1, 2, 3, 4});
1191 RtpHeaderExtensionMap extension_map;
1192 extension_map.Register<PlayoutDelayLimits>(1);
1193 RtpPacketToSend packet_to_send(&extension_map);
1194 packet_to_send.SetPayloadType(kPayloadType);
1195 packet_to_send.SetSequenceNumber(1);
1196
1197 // Set playout delay on outgoing packet.
1198 EXPECT_TRUE(packet_to_send.SetExtension<PlayoutDelayLimits>(
1199 kTransmittedPlayoutDelay));
1200 uint8_t* payload = packet_to_send.AllocatePayload(payload_data.size());
1201 memcpy(payload, payload_data.data(), payload_data.size());
1202
1203 RtpPacketReceived received_packet(&extension_map);
1204 received_packet.Parse(packet_to_send.data(), packet_to_send.size());
1205
1206 RTPVideoHeader video_header =
1207 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
1208 mock_on_complete_frame_callback_.AppendExpectedBitstream(payload_data.data(),
1209 payload_data.size());
1210 // Expect the playout delay of encoded frame to be the same as the transmitted
1211 // playout delay unless it was overridden by a field trial.
1212 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
1213 .WillOnce(Invoke([expected_playout_delay = GetParam().expected_delay](
1214 video_coding::EncodedFrame* frame) {
1215 EXPECT_EQ(frame->EncodedImage().playout_delay_, expected_playout_delay);
1216 }));
1217 rtp_video_stream_receiver_->OnReceivedPayloadData(
1218 received_packet.PayloadBuffer(), received_packet, video_header);
1219 }
1220
1221 } // namespace webrtc
1222