1 /*
2 * Copyright 2017 The WebRTC Project Authors. All rights reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "video/rtp_video_stream_receiver2.h"
12
13 #include <memory>
14 #include <utility>
15
16 #include "api/task_queue/task_queue_base.h"
17 #include "api/video/video_codec_type.h"
18 #include "api/video/video_frame_type.h"
19 #include "call/test/mock_rtp_packet_sink_interface.h"
20 #include "common_video/h264/h264_common.h"
21 #include "media/base/media_constants.h"
22 #include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
23 #include "modules/rtp_rtcp/source/rtp_format.h"
24 #include "modules/rtp_rtcp/source/rtp_format_vp9.h"
25 #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
26 #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
27 #include "modules/rtp_rtcp/source/rtp_header_extensions.h"
28 #include "modules/rtp_rtcp/source/rtp_packet_received.h"
29 #include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
30 #include "modules/video_coding/frame_object.h"
31 #include "modules/video_coding/include/video_coding_defines.h"
32 #include "modules/video_coding/rtp_frame_reference_finder.h"
33 #include "rtc_base/byte_buffer.h"
34 #include "rtc_base/logging.h"
35 #include "system_wrappers/include/clock.h"
36 #include "test/gmock.h"
37 #include "test/gtest.h"
38 #include "test/mock_frame_transformer.h"
39 #include "test/mock_transport.h"
40 #include "test/rtcp_packet_parser.h"
41 #include "test/scoped_key_value_config.h"
42 #include "test/time_controller/simulated_task_queue.h"
43 #include "test/time_controller/simulated_time_controller.h"
44
45 using ::testing::_;
46 using ::testing::ElementsAre;
47 using ::testing::Eq;
48 using ::testing::Invoke;
49 using ::testing::SizeIs;
50 using ::testing::Values;
51
52 namespace webrtc {
53
54 namespace {
55
56 const uint8_t kH264StartCode[] = {0x00, 0x00, 0x00, 0x01};
57
GetAbsoluteCaptureTimestamps(const EncodedFrame * frame)58 std::vector<uint64_t> GetAbsoluteCaptureTimestamps(const EncodedFrame* frame) {
59 std::vector<uint64_t> result;
60 for (const auto& packet_info : frame->PacketInfos()) {
61 if (packet_info.absolute_capture_time()) {
62 result.push_back(
63 packet_info.absolute_capture_time()->absolute_capture_timestamp);
64 }
65 }
66 return result;
67 }
68
GetGenericVideoHeader(VideoFrameType frame_type)69 RTPVideoHeader GetGenericVideoHeader(VideoFrameType frame_type) {
70 RTPVideoHeader video_header;
71 video_header.is_first_packet_in_frame = true;
72 video_header.is_last_packet_in_frame = true;
73 video_header.codec = kVideoCodecGeneric;
74 video_header.frame_type = frame_type;
75 return video_header;
76 }
77
78 class MockNackSender : public NackSender {
79 public:
80 MOCK_METHOD(void,
81 SendNack,
82 (const std::vector<uint16_t>& sequence_numbers,
83 bool buffering_allowed),
84 (override));
85 };
86
87 class MockKeyFrameRequestSender : public KeyFrameRequestSender {
88 public:
89 MOCK_METHOD(void, RequestKeyFrame, (), (override));
90 };
91
92 class MockOnCompleteFrameCallback
93 : public RtpVideoStreamReceiver2::OnCompleteFrameCallback {
94 public:
95 MOCK_METHOD(void, DoOnCompleteFrame, (EncodedFrame*), ());
96 MOCK_METHOD(void, DoOnCompleteFrameFailNullptr, (EncodedFrame*), ());
97 MOCK_METHOD(void, DoOnCompleteFrameFailLength, (EncodedFrame*), ());
98 MOCK_METHOD(void, DoOnCompleteFrameFailBitstream, (EncodedFrame*), ());
OnCompleteFrame(std::unique_ptr<EncodedFrame> frame)99 void OnCompleteFrame(std::unique_ptr<EncodedFrame> frame) override {
100 if (!frame) {
101 DoOnCompleteFrameFailNullptr(nullptr);
102 return;
103 }
104 EXPECT_EQ(buffer_.Length(), frame->size());
105 if (buffer_.Length() != frame->size()) {
106 DoOnCompleteFrameFailLength(frame.get());
107 return;
108 }
109 if (frame->size() != buffer_.Length() ||
110 memcmp(buffer_.Data(), frame->data(), buffer_.Length()) != 0) {
111 DoOnCompleteFrameFailBitstream(frame.get());
112 return;
113 }
114 DoOnCompleteFrame(frame.get());
115 }
116
ClearExpectedBitstream()117 void ClearExpectedBitstream() { buffer_.Clear(); }
118
AppendExpectedBitstream(const uint8_t data[],size_t size_in_bytes)119 void AppendExpectedBitstream(const uint8_t data[], size_t size_in_bytes) {
120 // TODO(Johan): Let rtc::ByteBuffer handle uint8_t* instead of char*.
121 buffer_.WriteBytes(reinterpret_cast<const char*>(data), size_in_bytes);
122 }
123 rtc::ByteBufferWriter buffer_;
124 };
125
126 constexpr uint32_t kSsrc = 111;
127 constexpr int kPayloadType = 100;
128 constexpr int kRedPayloadType = 125;
129
CreateRtpPacketReceived()130 std::unique_ptr<RtpPacketReceived> CreateRtpPacketReceived() {
131 constexpr uint16_t kSequenceNumber = 222;
132 auto packet = std::make_unique<RtpPacketReceived>();
133 packet->SetSsrc(kSsrc);
134 packet->SetSequenceNumber(kSequenceNumber);
135 packet->SetPayloadType(kPayloadType);
136 return packet;
137 }
138
139 MATCHER_P(SamePacketAs, other, "") {
140 return arg.Ssrc() == other.Ssrc() &&
141 arg.SequenceNumber() == other.SequenceNumber();
142 }
143
144 } // namespace
145
146 class RtpVideoStreamReceiver2Test : public ::testing::Test,
147 public RtpPacketSinkInterface {
148 public:
RtpVideoStreamReceiver2Test()149 RtpVideoStreamReceiver2Test() : RtpVideoStreamReceiver2Test("") {}
RtpVideoStreamReceiver2Test(std::string field_trials)150 explicit RtpVideoStreamReceiver2Test(std::string field_trials)
151 : time_controller_(Timestamp::Millis(100)),
152 task_queue_(time_controller_.GetTaskQueueFactory()->CreateTaskQueue(
153 "RtpVideoStreamReceiver2Test",
154 TaskQueueFactory::Priority::NORMAL)),
155 task_queue_setter_(task_queue_.get()),
156 field_trials_(field_trials),
157 config_(CreateConfig()) {
158 rtp_receive_statistics_ =
159 ReceiveStatistics::Create(Clock::GetRealTimeClock());
160 rtp_video_stream_receiver_ = std::make_unique<RtpVideoStreamReceiver2>(
161 TaskQueueBase::Current(), Clock::GetRealTimeClock(), &mock_transport_,
162 nullptr, nullptr, &config_, rtp_receive_statistics_.get(), nullptr,
163 nullptr, &nack_periodic_processor_, &mock_on_complete_frame_callback_,
164 nullptr, nullptr, field_trials_, nullptr);
165 rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType,
166 kVideoCodecGeneric, {},
167 /*raw_payload=*/false);
168 ON_CALL(mock_transport_, SendRtcp)
169 .WillByDefault(
170 Invoke(&rtcp_packet_parser_, &test::RtcpPacketParser::Parse));
171 }
172
GetDefaultH264VideoHeader()173 RTPVideoHeader GetDefaultH264VideoHeader() {
174 RTPVideoHeader video_header;
175 video_header.codec = kVideoCodecH264;
176 video_header.video_type_header.emplace<RTPVideoHeaderH264>();
177 return video_header;
178 }
179
180 // TODO(Johan): refactor h264_sps_pps_tracker_unittests.cc to avoid duplicate
181 // code.
AddSps(RTPVideoHeader * video_header,uint8_t sps_id,rtc::CopyOnWriteBuffer * data)182 void AddSps(RTPVideoHeader* video_header,
183 uint8_t sps_id,
184 rtc::CopyOnWriteBuffer* data) {
185 NaluInfo info;
186 info.type = H264::NaluType::kSps;
187 info.sps_id = sps_id;
188 info.pps_id = -1;
189 data->AppendData<uint8_t, 2>({H264::NaluType::kSps, sps_id});
190 auto& h264 = absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
191 h264.nalus[h264.nalus_length++] = info;
192 }
193
AddPps(RTPVideoHeader * video_header,uint8_t sps_id,uint8_t pps_id,rtc::CopyOnWriteBuffer * data)194 void AddPps(RTPVideoHeader* video_header,
195 uint8_t sps_id,
196 uint8_t pps_id,
197 rtc::CopyOnWriteBuffer* data) {
198 NaluInfo info;
199 info.type = H264::NaluType::kPps;
200 info.sps_id = sps_id;
201 info.pps_id = pps_id;
202 data->AppendData<uint8_t, 2>({H264::NaluType::kPps, pps_id});
203 auto& h264 = absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
204 h264.nalus[h264.nalus_length++] = info;
205 }
206
AddIdr(RTPVideoHeader * video_header,int pps_id)207 void AddIdr(RTPVideoHeader* video_header, int pps_id) {
208 NaluInfo info;
209 info.type = H264::NaluType::kIdr;
210 info.sps_id = -1;
211 info.pps_id = pps_id;
212 auto& h264 = absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
213 h264.nalus[h264.nalus_length++] = info;
214 }
215
OnRtpPacket(const RtpPacketReceived & packet)216 void OnRtpPacket(const RtpPacketReceived& packet) override {
217 if (test_packet_sink_)
218 test_packet_sink_->OnRtpPacket(packet);
219 }
220
221 protected:
CreateConfig()222 VideoReceiveStreamInterface::Config CreateConfig() {
223 VideoReceiveStreamInterface::Config config(nullptr);
224 config.rtp.remote_ssrc = 1111;
225 config.rtp.local_ssrc = 2222;
226 config.rtp.red_payload_type = kRedPayloadType;
227 config.rtp.packet_sink_ = this;
228 return config;
229 }
230
231 GlobalSimulatedTimeController time_controller_;
232 std::unique_ptr<TaskQueueBase, TaskQueueDeleter> task_queue_;
233 TokenTaskQueue::CurrentTaskQueueSetter task_queue_setter_;
234
235 webrtc::test::ScopedKeyValueConfig field_trials_;
236 VideoReceiveStreamInterface::Config config_;
237 NackPeriodicProcessor nack_periodic_processor_;
238 test::RtcpPacketParser rtcp_packet_parser_;
239 MockTransport mock_transport_;
240 MockOnCompleteFrameCallback mock_on_complete_frame_callback_;
241 std::unique_ptr<ReceiveStatistics> rtp_receive_statistics_;
242 std::unique_ptr<RtpVideoStreamReceiver2> rtp_video_stream_receiver_;
243 RtpPacketSinkInterface* test_packet_sink_ = nullptr;
244 };
245
TEST_F(RtpVideoStreamReceiver2Test,CacheColorSpaceFromLastPacketOfKeyframe)246 TEST_F(RtpVideoStreamReceiver2Test, CacheColorSpaceFromLastPacketOfKeyframe) {
247 // Test that color space is cached from the last packet of a key frame and
248 // that it's not reset by padding packets without color space.
249 constexpr int kVp9PayloadType = 99;
250 const ColorSpace kColorSpace(
251 ColorSpace::PrimaryID::kFILM, ColorSpace::TransferID::kBT2020_12,
252 ColorSpace::MatrixID::kBT2020_NCL, ColorSpace::RangeID::kFull);
253 const std::vector<uint8_t> kKeyFramePayload = {0, 1, 2, 3, 4, 5,
254 6, 7, 8, 9, 10};
255 const std::vector<uint8_t> kDeltaFramePayload = {0, 1, 2, 3, 4};
256
257 // Anonymous helper class that generates received packets.
258 class {
259 public:
260 void SetPayload(const std::vector<uint8_t>& payload,
261 VideoFrameType video_frame_type) {
262 video_frame_type_ = video_frame_type;
263 RtpPacketizer::PayloadSizeLimits pay_load_size_limits;
264 // Reduce max payload length to make sure the key frame generates two
265 // packets.
266 pay_load_size_limits.max_payload_len = 8;
267 RTPVideoHeaderVP9 rtp_video_header_vp9;
268 rtp_video_header_vp9.InitRTPVideoHeaderVP9();
269 rtp_video_header_vp9.inter_pic_predicted =
270 (video_frame_type == VideoFrameType::kVideoFrameDelta);
271 rtp_packetizer_ = std::make_unique<RtpPacketizerVp9>(
272 payload, pay_load_size_limits, rtp_video_header_vp9);
273 }
274
275 size_t NumPackets() { return rtp_packetizer_->NumPackets(); }
276 void SetColorSpace(const ColorSpace& color_space) {
277 color_space_ = color_space;
278 }
279
280 RtpPacketReceived NextPacket() {
281 RtpHeaderExtensionMap extension_map;
282 extension_map.Register<ColorSpaceExtension>(1);
283 RtpPacketToSend packet_to_send(&extension_map);
284 packet_to_send.SetSequenceNumber(sequence_number_++);
285 packet_to_send.SetSsrc(kSsrc);
286 packet_to_send.SetPayloadType(kVp9PayloadType);
287 bool include_color_space =
288 (rtp_packetizer_->NumPackets() == 1u &&
289 video_frame_type_ == VideoFrameType::kVideoFrameKey);
290 if (include_color_space) {
291 EXPECT_TRUE(
292 packet_to_send.SetExtension<ColorSpaceExtension>(color_space_));
293 }
294 rtp_packetizer_->NextPacket(&packet_to_send);
295
296 RtpPacketReceived received_packet(&extension_map);
297 received_packet.Parse(packet_to_send.data(), packet_to_send.size());
298 return received_packet;
299 }
300
301 private:
302 uint16_t sequence_number_ = 0;
303 VideoFrameType video_frame_type_;
304 ColorSpace color_space_;
305 std::unique_ptr<RtpPacketizer> rtp_packetizer_;
306 } received_packet_generator;
307 received_packet_generator.SetColorSpace(kColorSpace);
308
309 // Prepare the receiver for VP9.
310 std::map<std::string, std::string> codec_params;
311 rtp_video_stream_receiver_->AddReceiveCodec(kVp9PayloadType, kVideoCodecVP9,
312 codec_params,
313 /*raw_payload=*/false);
314
315 // Generate key frame packets.
316 received_packet_generator.SetPayload(kKeyFramePayload,
317 VideoFrameType::kVideoFrameKey);
318 EXPECT_EQ(received_packet_generator.NumPackets(), 2u);
319 RtpPacketReceived key_frame_packet1 = received_packet_generator.NextPacket();
320 RtpPacketReceived key_frame_packet2 = received_packet_generator.NextPacket();
321
322 // Generate delta frame packet.
323 received_packet_generator.SetPayload(kDeltaFramePayload,
324 VideoFrameType::kVideoFrameDelta);
325 EXPECT_EQ(received_packet_generator.NumPackets(), 1u);
326 RtpPacketReceived delta_frame_packet = received_packet_generator.NextPacket();
327
328 rtp_video_stream_receiver_->StartReceive();
329 mock_on_complete_frame_callback_.AppendExpectedBitstream(
330 kKeyFramePayload.data(), kKeyFramePayload.size());
331
332 // Send the key frame and expect a callback with color space information.
333 EXPECT_FALSE(key_frame_packet1.GetExtension<ColorSpaceExtension>());
334 EXPECT_TRUE(key_frame_packet2.GetExtension<ColorSpaceExtension>());
335 rtp_video_stream_receiver_->OnRtpPacket(key_frame_packet1);
336 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
337 .WillOnce(Invoke([kColorSpace](EncodedFrame* frame) {
338 ASSERT_TRUE(frame->EncodedImage().ColorSpace());
339 EXPECT_EQ(*frame->EncodedImage().ColorSpace(), kColorSpace);
340 }));
341 rtp_video_stream_receiver_->OnRtpPacket(key_frame_packet2);
342 // Resend the first key frame packet to simulate padding for example.
343 rtp_video_stream_receiver_->OnRtpPacket(key_frame_packet1);
344
345 mock_on_complete_frame_callback_.ClearExpectedBitstream();
346 mock_on_complete_frame_callback_.AppendExpectedBitstream(
347 kDeltaFramePayload.data(), kDeltaFramePayload.size());
348
349 // Expect delta frame to have color space set even though color space not
350 // included in the RTP packet.
351 EXPECT_FALSE(delta_frame_packet.GetExtension<ColorSpaceExtension>());
352 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
353 .WillOnce(Invoke([kColorSpace](EncodedFrame* frame) {
354 ASSERT_TRUE(frame->EncodedImage().ColorSpace());
355 EXPECT_EQ(*frame->EncodedImage().ColorSpace(), kColorSpace);
356 }));
357 rtp_video_stream_receiver_->OnRtpPacket(delta_frame_packet);
358 }
359
TEST_F(RtpVideoStreamReceiver2Test,GenericKeyFrame)360 TEST_F(RtpVideoStreamReceiver2Test, GenericKeyFrame) {
361 RtpPacketReceived rtp_packet;
362 rtc::CopyOnWriteBuffer data({'1', '2', '3', '4'});
363 rtp_packet.SetPayloadType(kPayloadType);
364 rtp_packet.SetSequenceNumber(1);
365 RTPVideoHeader video_header =
366 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
367 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
368 data.size());
369 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
370 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
371 video_header);
372 }
373
TEST_F(RtpVideoStreamReceiver2Test,SetProtectionPayloadTypes)374 TEST_F(RtpVideoStreamReceiver2Test, SetProtectionPayloadTypes) {
375 EXPECT_NE(rtp_video_stream_receiver_->red_payload_type(), 104);
376 EXPECT_NE(rtp_video_stream_receiver_->ulpfec_payload_type(), 107);
377
378 rtp_video_stream_receiver_->SetProtectionPayloadTypes(104, 107);
379
380 EXPECT_EQ(rtp_video_stream_receiver_->red_payload_type(), 104);
381 EXPECT_EQ(rtp_video_stream_receiver_->ulpfec_payload_type(), 107);
382 }
383
TEST_F(RtpVideoStreamReceiver2Test,PacketInfoIsPropagatedIntoVideoFrames)384 TEST_F(RtpVideoStreamReceiver2Test, PacketInfoIsPropagatedIntoVideoFrames) {
385 constexpr uint64_t kAbsoluteCaptureTimestamp = 12;
386 constexpr int kId0 = 1;
387
388 RtpHeaderExtensionMap extension_map;
389 extension_map.Register<AbsoluteCaptureTimeExtension>(kId0);
390 RtpPacketReceived rtp_packet(&extension_map);
391 rtp_packet.SetPayloadType(kPayloadType);
392 rtc::CopyOnWriteBuffer data({'1', '2', '3', '4'});
393 rtp_packet.SetSequenceNumber(1);
394 rtp_packet.SetTimestamp(1);
395 rtp_packet.SetSsrc(kSsrc);
396 rtp_packet.SetExtension<AbsoluteCaptureTimeExtension>(
397 AbsoluteCaptureTime{kAbsoluteCaptureTimestamp,
398 /*estimated_capture_clock_offset=*/absl::nullopt});
399
400 RTPVideoHeader video_header =
401 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
402 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
403 data.size());
404 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
405 .WillOnce(Invoke([kAbsoluteCaptureTimestamp](EncodedFrame* frame) {
406 EXPECT_THAT(GetAbsoluteCaptureTimestamps(frame),
407 ElementsAre(kAbsoluteCaptureTimestamp));
408 }));
409 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
410 video_header);
411 }
412
TEST_F(RtpVideoStreamReceiver2Test,MissingAbsoluteCaptureTimeIsFilledWithExtrapolatedValue)413 TEST_F(RtpVideoStreamReceiver2Test,
414 MissingAbsoluteCaptureTimeIsFilledWithExtrapolatedValue) {
415 constexpr uint64_t kAbsoluteCaptureTimestamp = 12;
416 constexpr int kId0 = 1;
417
418 RtpHeaderExtensionMap extension_map;
419 extension_map.Register<AbsoluteCaptureTimeExtension>(kId0);
420 RtpPacketReceived rtp_packet(&extension_map);
421 rtp_packet.SetPayloadType(kPayloadType);
422
423 rtc::CopyOnWriteBuffer data({'1', '2', '3', '4'});
424 uint16_t sequence_number = 1;
425 uint32_t rtp_timestamp = 1;
426 rtp_packet.SetSequenceNumber(sequence_number);
427 rtp_packet.SetTimestamp(rtp_timestamp);
428 rtp_packet.SetSsrc(kSsrc);
429 rtp_packet.SetExtension<AbsoluteCaptureTimeExtension>(
430 AbsoluteCaptureTime{kAbsoluteCaptureTimestamp,
431 /*estimated_capture_clock_offset=*/absl::nullopt});
432
433 RTPVideoHeader video_header =
434 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
435 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
436 data.size());
437 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
438 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
439 video_header);
440
441 // Rtp packet without absolute capture time.
442 rtp_packet = RtpPacketReceived(&extension_map);
443 rtp_packet.SetPayloadType(kPayloadType);
444 rtp_packet.SetSequenceNumber(++sequence_number);
445 rtp_packet.SetTimestamp(++rtp_timestamp);
446 rtp_packet.SetSsrc(kSsrc);
447
448 // There is no absolute capture time in the second packet.
449 // Expect rtp video stream receiver to extrapolate it for the resulting video
450 // frame using absolute capture time from the previous packet.
451 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
452 .WillOnce(Invoke([](EncodedFrame* frame) {
453 EXPECT_THAT(GetAbsoluteCaptureTimestamps(frame), SizeIs(1));
454 }));
455 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
456 video_header);
457 }
458
TEST_F(RtpVideoStreamReceiver2Test,NoInfiniteRecursionOnEncapsulatedRedPacket)459 TEST_F(RtpVideoStreamReceiver2Test,
460 NoInfiniteRecursionOnEncapsulatedRedPacket) {
461 const std::vector<uint8_t> data({
462 0x80, // RTP version.
463 kRedPayloadType, // Payload type.
464 0, 0, 0, 0, 0, 0, // Don't care.
465 0, 0, 0x4, 0x57, // SSRC
466 kRedPayloadType, // RED header.
467 0, 0, 0, 0, 0 // Don't care.
468 });
469 RtpPacketReceived packet;
470 EXPECT_TRUE(packet.Parse(data.data(), data.size()));
471 rtp_video_stream_receiver_->StartReceive();
472 rtp_video_stream_receiver_->OnRtpPacket(packet);
473 }
474
TEST_F(RtpVideoStreamReceiver2Test,DropsPacketWithRedPayloadTypeAndEmptyPayload)475 TEST_F(RtpVideoStreamReceiver2Test,
476 DropsPacketWithRedPayloadTypeAndEmptyPayload) {
477 const uint8_t kRedPayloadType = 125;
478 config_.rtp.red_payload_type = kRedPayloadType;
479 SetUp(); // re-create rtp_video_stream_receiver with red payload type.
480 // clang-format off
481 const uint8_t data[] = {
482 0x80, // RTP version.
483 kRedPayloadType, // Payload type.
484 0, 0, 0, 0, 0, 0, // Don't care.
485 0, 0, 0x4, 0x57, // SSRC
486 // Empty rtp payload.
487 };
488 // clang-format on
489 RtpPacketReceived packet;
490 // Manually convert to CopyOnWriteBuffer to be sure capacity == size
491 // and asan bot can catch read buffer overflow.
492 EXPECT_TRUE(packet.Parse(rtc::CopyOnWriteBuffer(data)));
493 rtp_video_stream_receiver_->StartReceive();
494 rtp_video_stream_receiver_->OnRtpPacket(packet);
495 // Expect asan doesn't find anything.
496 }
497
TEST_F(RtpVideoStreamReceiver2Test,GenericKeyFrameBitstreamError)498 TEST_F(RtpVideoStreamReceiver2Test, GenericKeyFrameBitstreamError) {
499 RtpPacketReceived rtp_packet;
500 rtp_packet.SetPayloadType(kPayloadType);
501 rtc::CopyOnWriteBuffer data({'1', '2', '3', '4'});
502 rtp_packet.SetSequenceNumber(1);
503 RTPVideoHeader video_header =
504 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
505 constexpr uint8_t expected_bitsteam[] = {1, 2, 3, 0xff};
506 mock_on_complete_frame_callback_.AppendExpectedBitstream(
507 expected_bitsteam, sizeof(expected_bitsteam));
508 EXPECT_CALL(mock_on_complete_frame_callback_,
509 DoOnCompleteFrameFailBitstream(_));
510 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
511 video_header);
512 }
513
514 class RtpVideoStreamReceiver2TestH264
515 : public RtpVideoStreamReceiver2Test,
516 public ::testing::WithParamInterface<std::string> {
517 protected:
RtpVideoStreamReceiver2TestH264()518 RtpVideoStreamReceiver2TestH264() : RtpVideoStreamReceiver2Test(GetParam()) {}
519 };
520
521 INSTANTIATE_TEST_SUITE_P(SpsPpsIdrIsKeyframe,
522 RtpVideoStreamReceiver2TestH264,
523 Values("", "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/"));
524
TEST_P(RtpVideoStreamReceiver2TestH264,InBandSpsPps)525 TEST_P(RtpVideoStreamReceiver2TestH264, InBandSpsPps) {
526 rtc::CopyOnWriteBuffer sps_data;
527 RtpPacketReceived rtp_packet;
528 RTPVideoHeader sps_video_header = GetDefaultH264VideoHeader();
529 AddSps(&sps_video_header, 0, &sps_data);
530 rtp_packet.SetSequenceNumber(0);
531 rtp_packet.SetPayloadType(kPayloadType);
532 sps_video_header.is_first_packet_in_frame = true;
533 sps_video_header.frame_type = VideoFrameType::kEmptyFrame;
534 mock_on_complete_frame_callback_.AppendExpectedBitstream(
535 kH264StartCode, sizeof(kH264StartCode));
536 mock_on_complete_frame_callback_.AppendExpectedBitstream(sps_data.data(),
537 sps_data.size());
538 rtp_video_stream_receiver_->OnReceivedPayloadData(sps_data, rtp_packet,
539 sps_video_header);
540
541 rtc::CopyOnWriteBuffer pps_data;
542 RTPVideoHeader pps_video_header = GetDefaultH264VideoHeader();
543 AddPps(&pps_video_header, 0, 1, &pps_data);
544 rtp_packet.SetSequenceNumber(1);
545 pps_video_header.is_first_packet_in_frame = true;
546 pps_video_header.frame_type = VideoFrameType::kEmptyFrame;
547 mock_on_complete_frame_callback_.AppendExpectedBitstream(
548 kH264StartCode, sizeof(kH264StartCode));
549 mock_on_complete_frame_callback_.AppendExpectedBitstream(pps_data.data(),
550 pps_data.size());
551 rtp_video_stream_receiver_->OnReceivedPayloadData(pps_data, rtp_packet,
552 pps_video_header);
553
554 rtc::CopyOnWriteBuffer idr_data;
555 RTPVideoHeader idr_video_header = GetDefaultH264VideoHeader();
556 AddIdr(&idr_video_header, 1);
557 rtp_packet.SetSequenceNumber(2);
558 idr_video_header.is_first_packet_in_frame = true;
559 idr_video_header.is_last_packet_in_frame = true;
560 idr_video_header.frame_type = VideoFrameType::kVideoFrameKey;
561 const uint8_t idr[] = {0x65, 1, 2, 3};
562 idr_data.AppendData(idr);
563 mock_on_complete_frame_callback_.AppendExpectedBitstream(
564 kH264StartCode, sizeof(kH264StartCode));
565 mock_on_complete_frame_callback_.AppendExpectedBitstream(idr_data.data(),
566 idr_data.size());
567 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
568 rtp_video_stream_receiver_->OnReceivedPayloadData(idr_data, rtp_packet,
569 idr_video_header);
570 }
571
TEST_P(RtpVideoStreamReceiver2TestH264,OutOfBandFmtpSpsPps)572 TEST_P(RtpVideoStreamReceiver2TestH264, OutOfBandFmtpSpsPps) {
573 constexpr int kPayloadType = 99;
574 std::map<std::string, std::string> codec_params;
575 // Example parameter sets from https://tools.ietf.org/html/rfc3984#section-8.2
576 // .
577 codec_params.insert(
578 {cricket::kH264FmtpSpropParameterSets, "Z0IACpZTBYmI,aMljiA=="});
579 rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType, kVideoCodecH264,
580 codec_params,
581 /*raw_payload=*/false);
582 const uint8_t binary_sps[] = {0x67, 0x42, 0x00, 0x0a, 0x96,
583 0x53, 0x05, 0x89, 0x88};
584 mock_on_complete_frame_callback_.AppendExpectedBitstream(
585 kH264StartCode, sizeof(kH264StartCode));
586 mock_on_complete_frame_callback_.AppendExpectedBitstream(binary_sps,
587 sizeof(binary_sps));
588 const uint8_t binary_pps[] = {0x68, 0xc9, 0x63, 0x88};
589 mock_on_complete_frame_callback_.AppendExpectedBitstream(
590 kH264StartCode, sizeof(kH264StartCode));
591 mock_on_complete_frame_callback_.AppendExpectedBitstream(binary_pps,
592 sizeof(binary_pps));
593
594 RtpPacketReceived rtp_packet;
595 RTPVideoHeader video_header = GetDefaultH264VideoHeader();
596 AddIdr(&video_header, 0);
597 rtp_packet.SetPayloadType(kPayloadType);
598 rtp_packet.SetSequenceNumber(2);
599 video_header.is_first_packet_in_frame = true;
600 video_header.is_last_packet_in_frame = true;
601 video_header.codec = kVideoCodecH264;
602 video_header.frame_type = VideoFrameType::kVideoFrameKey;
603 rtc::CopyOnWriteBuffer data({'1', '2', '3'});
604 mock_on_complete_frame_callback_.AppendExpectedBitstream(
605 kH264StartCode, sizeof(kH264StartCode));
606 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
607 data.size());
608 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
609 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
610 video_header);
611 }
612
TEST_P(RtpVideoStreamReceiver2TestH264,ForceSpsPpsIdrIsKeyframe)613 TEST_P(RtpVideoStreamReceiver2TestH264, ForceSpsPpsIdrIsKeyframe) {
614 constexpr int kPayloadType = 99;
615 std::map<std::string, std::string> codec_params;
616 if (GetParam() ==
617 "") { // Forcing can be done either with field trial or codec_params.
618 codec_params.insert({cricket::kH264FmtpSpsPpsIdrInKeyframe, ""});
619 }
620 rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType, kVideoCodecH264,
621 codec_params,
622 /*raw_payload=*/false);
623 rtc::CopyOnWriteBuffer sps_data;
624 RtpPacketReceived rtp_packet;
625 RTPVideoHeader sps_video_header = GetDefaultH264VideoHeader();
626 AddSps(&sps_video_header, 0, &sps_data);
627 rtp_packet.SetSequenceNumber(0);
628 rtp_packet.SetPayloadType(kPayloadType);
629 sps_video_header.is_first_packet_in_frame = true;
630 sps_video_header.frame_type = VideoFrameType::kEmptyFrame;
631 mock_on_complete_frame_callback_.AppendExpectedBitstream(
632 kH264StartCode, sizeof(kH264StartCode));
633 mock_on_complete_frame_callback_.AppendExpectedBitstream(sps_data.data(),
634 sps_data.size());
635 rtp_video_stream_receiver_->OnReceivedPayloadData(sps_data, rtp_packet,
636 sps_video_header);
637
638 rtc::CopyOnWriteBuffer pps_data;
639 RTPVideoHeader pps_video_header = GetDefaultH264VideoHeader();
640 AddPps(&pps_video_header, 0, 1, &pps_data);
641 rtp_packet.SetSequenceNumber(1);
642 pps_video_header.is_first_packet_in_frame = true;
643 pps_video_header.frame_type = VideoFrameType::kEmptyFrame;
644 mock_on_complete_frame_callback_.AppendExpectedBitstream(
645 kH264StartCode, sizeof(kH264StartCode));
646 mock_on_complete_frame_callback_.AppendExpectedBitstream(pps_data.data(),
647 pps_data.size());
648 rtp_video_stream_receiver_->OnReceivedPayloadData(pps_data, rtp_packet,
649 pps_video_header);
650
651 rtc::CopyOnWriteBuffer idr_data;
652 RTPVideoHeader idr_video_header = GetDefaultH264VideoHeader();
653 AddIdr(&idr_video_header, 1);
654 rtp_packet.SetSequenceNumber(2);
655 idr_video_header.is_first_packet_in_frame = true;
656 idr_video_header.is_last_packet_in_frame = true;
657 idr_video_header.frame_type = VideoFrameType::kVideoFrameKey;
658 const uint8_t idr[] = {0x65, 1, 2, 3};
659 idr_data.AppendData(idr);
660 mock_on_complete_frame_callback_.AppendExpectedBitstream(
661 kH264StartCode, sizeof(kH264StartCode));
662 mock_on_complete_frame_callback_.AppendExpectedBitstream(idr_data.data(),
663 idr_data.size());
664 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
665 .WillOnce(
666 [&](EncodedFrame* frame) { EXPECT_TRUE(frame->is_keyframe()); });
667 rtp_video_stream_receiver_->OnReceivedPayloadData(idr_data, rtp_packet,
668 idr_video_header);
669 mock_on_complete_frame_callback_.ClearExpectedBitstream();
670 mock_on_complete_frame_callback_.AppendExpectedBitstream(
671 kH264StartCode, sizeof(kH264StartCode));
672 mock_on_complete_frame_callback_.AppendExpectedBitstream(idr_data.data(),
673 idr_data.size());
674 rtp_packet.SetSequenceNumber(3);
675 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
676 .WillOnce(
677 [&](EncodedFrame* frame) { EXPECT_FALSE(frame->is_keyframe()); });
678 rtp_video_stream_receiver_->OnReceivedPayloadData(idr_data, rtp_packet,
679 idr_video_header);
680 }
681
TEST_F(RtpVideoStreamReceiver2Test,PaddingInMediaStream)682 TEST_F(RtpVideoStreamReceiver2Test, PaddingInMediaStream) {
683 RtpPacketReceived rtp_packet;
684 RTPVideoHeader video_header = GetDefaultH264VideoHeader();
685 rtc::CopyOnWriteBuffer data({'1', '2', '3'});
686 rtp_packet.SetPayloadType(kPayloadType);
687 rtp_packet.SetSequenceNumber(2);
688 video_header.is_first_packet_in_frame = true;
689 video_header.is_last_packet_in_frame = true;
690 video_header.codec = kVideoCodecGeneric;
691 video_header.frame_type = VideoFrameType::kVideoFrameKey;
692 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
693 data.size());
694
695 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
696 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
697 video_header);
698
699 rtp_packet.SetSequenceNumber(3);
700 rtp_video_stream_receiver_->OnReceivedPayloadData({}, rtp_packet,
701 video_header);
702
703 rtp_packet.SetSequenceNumber(4);
704 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
705 video_header.frame_type = VideoFrameType::kVideoFrameDelta;
706 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
707 video_header);
708
709 rtp_packet.SetSequenceNumber(6);
710 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
711 video_header);
712
713 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
714 rtp_packet.SetSequenceNumber(5);
715 rtp_video_stream_receiver_->OnReceivedPayloadData({}, rtp_packet,
716 video_header);
717 }
718
TEST_F(RtpVideoStreamReceiver2Test,RequestKeyframeIfFirstFrameIsDelta)719 TEST_F(RtpVideoStreamReceiver2Test, RequestKeyframeIfFirstFrameIsDelta) {
720 RtpPacketReceived rtp_packet;
721 rtp_packet.SetPayloadType(kPayloadType);
722 rtc::CopyOnWriteBuffer data({'1', '2', '3', '4'});
723 rtp_packet.SetSequenceNumber(1);
724 RTPVideoHeader video_header =
725 GetGenericVideoHeader(VideoFrameType::kVideoFrameDelta);
726
727 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
728 video_header);
729 EXPECT_THAT(rtcp_packet_parser_.pli()->num_packets(), Eq(1));
730 }
731
TEST_F(RtpVideoStreamReceiver2Test,RequestKeyframeWhenPacketBufferGetsFull)732 TEST_F(RtpVideoStreamReceiver2Test, RequestKeyframeWhenPacketBufferGetsFull) {
733 constexpr int kPacketBufferMaxSize = 2048;
734
735 RtpPacketReceived rtp_packet;
736 rtp_packet.SetPayloadType(kPayloadType);
737 rtc::CopyOnWriteBuffer data({'1', '2', '3', '4'});
738 RTPVideoHeader video_header =
739 GetGenericVideoHeader(VideoFrameType::kVideoFrameDelta);
740 // Incomplete frames so that the packet buffer is filling up.
741 video_header.is_last_packet_in_frame = false;
742 uint16_t start_sequence_number = 1234;
743 rtp_packet.SetSequenceNumber(start_sequence_number);
744 while (rtp_packet.SequenceNumber() - start_sequence_number <
745 kPacketBufferMaxSize) {
746 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
747 video_header);
748 rtp_packet.SetSequenceNumber(rtp_packet.SequenceNumber() + 2);
749 }
750
751 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
752 video_header);
753 EXPECT_THAT(rtcp_packet_parser_.pli()->num_packets(), Eq(1));
754 }
755
TEST_F(RtpVideoStreamReceiver2Test,SinkGetsRtpNotifications)756 TEST_F(RtpVideoStreamReceiver2Test, SinkGetsRtpNotifications) {
757 rtp_video_stream_receiver_->StartReceive();
758
759 MockRtpPacketSink test_sink;
760 test_packet_sink_ = &test_sink;
761
762 auto rtp_packet = CreateRtpPacketReceived();
763 EXPECT_CALL(test_sink, OnRtpPacket(SamePacketAs(*rtp_packet)));
764
765 rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
766
767 // Test tear-down.
768 rtp_video_stream_receiver_->StopReceive();
769 test_packet_sink_ = nullptr;
770 }
771
TEST_F(RtpVideoStreamReceiver2Test,NonStartedStreamGetsNoRtpCallbacks)772 TEST_F(RtpVideoStreamReceiver2Test, NonStartedStreamGetsNoRtpCallbacks) {
773 // Explicitly showing that the stream is not in the `started` state,
774 // regardless of whether streams start out `started` or `stopped`.
775 rtp_video_stream_receiver_->StopReceive();
776
777 MockRtpPacketSink test_sink;
778 test_packet_sink_ = &test_sink;
779
780 auto rtp_packet = CreateRtpPacketReceived();
781 EXPECT_CALL(test_sink, OnRtpPacket(_)).Times(0);
782
783 rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
784
785 test_packet_sink_ = nullptr;
786 }
787
TEST_F(RtpVideoStreamReceiver2Test,ParseGenericDescriptorOnePacket)788 TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorOnePacket) {
789 const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
790 const int kSpatialIndex = 1;
791
792 rtp_video_stream_receiver_->StartReceive();
793
794 RtpHeaderExtensionMap extension_map;
795 extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
796 RtpPacketReceived rtp_packet(&extension_map);
797 rtp_packet.SetPayloadType(kPayloadType);
798
799 RtpGenericFrameDescriptor generic_descriptor;
800 generic_descriptor.SetFirstPacketInSubFrame(true);
801 generic_descriptor.SetLastPacketInSubFrame(true);
802 generic_descriptor.SetFrameId(100);
803 generic_descriptor.SetSpatialLayersBitmask(1 << kSpatialIndex);
804 generic_descriptor.AddFrameDependencyDiff(90);
805 generic_descriptor.AddFrameDependencyDiff(80);
806 ASSERT_TRUE(rtp_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
807 generic_descriptor));
808
809 uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
810 memcpy(payload, data.data(), data.size());
811 // The first byte is the header, so we ignore the first byte of `data`.
812 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
813 data.size() - 1);
814
815 rtp_packet.SetMarker(true);
816 rtp_packet.SetPayloadType(kPayloadType);
817 rtp_packet.SetSequenceNumber(1);
818
819 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
820 .WillOnce(Invoke([kSpatialIndex](EncodedFrame* frame) {
821 EXPECT_EQ(frame->num_references, 2U);
822 EXPECT_EQ(frame->references[0], frame->Id() - 90);
823 EXPECT_EQ(frame->references[1], frame->Id() - 80);
824 EXPECT_EQ(frame->SpatialIndex(), kSpatialIndex);
825 EXPECT_THAT(frame->PacketInfos(), SizeIs(1));
826 }));
827
828 rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
829 }
830
TEST_F(RtpVideoStreamReceiver2Test,ParseGenericDescriptorTwoPackets)831 TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorTwoPackets) {
832 const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
833 const int kSpatialIndex = 1;
834
835 rtp_video_stream_receiver_->StartReceive();
836
837 RtpHeaderExtensionMap extension_map;
838 extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
839 RtpPacketReceived first_packet(&extension_map);
840
841 RtpGenericFrameDescriptor first_packet_descriptor;
842 first_packet_descriptor.SetFirstPacketInSubFrame(true);
843 first_packet_descriptor.SetLastPacketInSubFrame(false);
844 first_packet_descriptor.SetFrameId(100);
845 first_packet_descriptor.SetSpatialLayersBitmask(1 << kSpatialIndex);
846 first_packet_descriptor.SetResolution(480, 360);
847 ASSERT_TRUE(first_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
848 first_packet_descriptor));
849
850 uint8_t* first_packet_payload = first_packet.SetPayloadSize(data.size());
851 memcpy(first_packet_payload, data.data(), data.size());
852 // The first byte is the header, so we ignore the first byte of `data`.
853 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
854 data.size() - 1);
855
856 first_packet.SetPayloadType(kPayloadType);
857 first_packet.SetSequenceNumber(1);
858 rtp_video_stream_receiver_->OnRtpPacket(first_packet);
859
860 RtpPacketReceived second_packet(&extension_map);
861 RtpGenericFrameDescriptor second_packet_descriptor;
862 second_packet_descriptor.SetFirstPacketInSubFrame(false);
863 second_packet_descriptor.SetLastPacketInSubFrame(true);
864 ASSERT_TRUE(second_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
865 second_packet_descriptor));
866
867 second_packet.SetMarker(true);
868 second_packet.SetPayloadType(kPayloadType);
869 second_packet.SetSequenceNumber(2);
870
871 uint8_t* second_packet_payload = second_packet.SetPayloadSize(data.size());
872 memcpy(second_packet_payload, data.data(), data.size());
873 // The first byte is the header, so we ignore the first byte of `data`.
874 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
875 data.size() - 1);
876
877 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
878 .WillOnce(Invoke([kSpatialIndex](EncodedFrame* frame) {
879 EXPECT_EQ(frame->num_references, 0U);
880 EXPECT_EQ(frame->SpatialIndex(), kSpatialIndex);
881 EXPECT_EQ(frame->EncodedImage()._encodedWidth, 480u);
882 EXPECT_EQ(frame->EncodedImage()._encodedHeight, 360u);
883 EXPECT_THAT(frame->PacketInfos(), SizeIs(2));
884 }));
885
886 rtp_video_stream_receiver_->OnRtpPacket(second_packet);
887 }
888
TEST_F(RtpVideoStreamReceiver2Test,ParseGenericDescriptorRawPayload)889 TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorRawPayload) {
890 const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
891 const int kRawPayloadType = 123;
892
893 rtp_video_stream_receiver_->AddReceiveCodec(kRawPayloadType,
894 kVideoCodecGeneric, {},
895 /*raw_payload=*/true);
896 rtp_video_stream_receiver_->StartReceive();
897
898 RtpHeaderExtensionMap extension_map;
899 extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
900 RtpPacketReceived rtp_packet(&extension_map);
901
902 RtpGenericFrameDescriptor generic_descriptor;
903 generic_descriptor.SetFirstPacketInSubFrame(true);
904 generic_descriptor.SetLastPacketInSubFrame(true);
905 ASSERT_TRUE(rtp_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
906 generic_descriptor));
907
908 uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
909 memcpy(payload, data.data(), data.size());
910 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
911 data.size());
912
913 rtp_packet.SetMarker(true);
914 rtp_packet.SetPayloadType(kRawPayloadType);
915 rtp_packet.SetSequenceNumber(1);
916
917 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame);
918 rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
919 }
920
TEST_F(RtpVideoStreamReceiver2Test,UnwrapsFrameId)921 TEST_F(RtpVideoStreamReceiver2Test, UnwrapsFrameId) {
922 const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
923 const int kPayloadType = 123;
924
925 rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType, kVideoCodecGeneric,
926 {},
927 /*raw_payload=*/true);
928 rtp_video_stream_receiver_->StartReceive();
929 RtpHeaderExtensionMap extension_map;
930 extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
931
932 uint16_t rtp_sequence_number = 1;
933 auto inject_packet = [&](uint16_t wrapped_frame_id) {
934 RtpPacketReceived rtp_packet(&extension_map);
935
936 RtpGenericFrameDescriptor generic_descriptor;
937 generic_descriptor.SetFirstPacketInSubFrame(true);
938 generic_descriptor.SetLastPacketInSubFrame(true);
939 generic_descriptor.SetFrameId(wrapped_frame_id);
940 ASSERT_TRUE(rtp_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
941 generic_descriptor));
942
943 uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
944 ASSERT_TRUE(payload);
945 memcpy(payload, data.data(), data.size());
946 mock_on_complete_frame_callback_.ClearExpectedBitstream();
947 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
948 data.size());
949 rtp_packet.SetMarker(true);
950 rtp_packet.SetPayloadType(kPayloadType);
951 rtp_packet.SetSequenceNumber(++rtp_sequence_number);
952 rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
953 };
954
955 int64_t first_picture_id;
956 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
957 .WillOnce([&](EncodedFrame* frame) { first_picture_id = frame->Id(); });
958 inject_packet(/*wrapped_frame_id=*/0xffff);
959
960 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
961 .WillOnce([&](EncodedFrame* frame) {
962 EXPECT_EQ(frame->Id() - first_picture_id, 3);
963 });
964 inject_packet(/*wrapped_frame_id=*/0x0002);
965 }
966
967 class RtpVideoStreamReceiver2DependencyDescriptorTest
968 : public RtpVideoStreamReceiver2Test {
969 public:
RtpVideoStreamReceiver2DependencyDescriptorTest()970 RtpVideoStreamReceiver2DependencyDescriptorTest() {
971 rtp_video_stream_receiver_->AddReceiveCodec(payload_type_,
972 kVideoCodecGeneric, {},
973 /*raw_payload=*/true);
974 extension_map_.Register<RtpDependencyDescriptorExtension>(7);
975 rtp_video_stream_receiver_->StartReceive();
976 }
977
978 // Returns some valid structure for the DependencyDescriptors.
979 // First template of that structure always fit for a key frame.
CreateStreamStructure()980 static FrameDependencyStructure CreateStreamStructure() {
981 FrameDependencyStructure stream_structure;
982 stream_structure.num_decode_targets = 1;
983 stream_structure.templates = {
984 FrameDependencyTemplate().Dtis("S"),
985 FrameDependencyTemplate().Dtis("S").FrameDiffs({1}),
986 };
987 return stream_structure;
988 }
989
InjectPacketWith(const FrameDependencyStructure & stream_structure,const DependencyDescriptor & dependency_descriptor)990 void InjectPacketWith(const FrameDependencyStructure& stream_structure,
991 const DependencyDescriptor& dependency_descriptor) {
992 const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
993 RtpPacketReceived rtp_packet(&extension_map_);
994 ASSERT_TRUE(rtp_packet.SetExtension<RtpDependencyDescriptorExtension>(
995 stream_structure, dependency_descriptor));
996 uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
997 ASSERT_TRUE(payload);
998 memcpy(payload, data.data(), data.size());
999 mock_on_complete_frame_callback_.ClearExpectedBitstream();
1000 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
1001 data.size());
1002 rtp_packet.SetMarker(true);
1003 rtp_packet.SetPayloadType(payload_type_);
1004 rtp_packet.SetSequenceNumber(++rtp_sequence_number_);
1005 rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
1006 }
1007
1008 private:
1009 const int payload_type_ = 123;
1010 RtpHeaderExtensionMap extension_map_;
1011 uint16_t rtp_sequence_number_ = 321;
1012 };
1013
TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,UnwrapsFrameId)1014 TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest, UnwrapsFrameId) {
1015 FrameDependencyStructure stream_structure = CreateStreamStructure();
1016
1017 DependencyDescriptor keyframe_descriptor;
1018 keyframe_descriptor.attached_structure =
1019 std::make_unique<FrameDependencyStructure>(stream_structure);
1020 keyframe_descriptor.frame_dependencies = stream_structure.templates[0];
1021 keyframe_descriptor.frame_number = 0xfff0;
1022 // DependencyDescriptor doesn't support reordering delta frame before
1023 // keyframe. Thus feed a key frame first, then test reodered delta frames.
1024 int64_t first_picture_id;
1025 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
1026 .WillOnce([&](EncodedFrame* frame) { first_picture_id = frame->Id(); });
1027 InjectPacketWith(stream_structure, keyframe_descriptor);
1028
1029 DependencyDescriptor deltaframe1_descriptor;
1030 deltaframe1_descriptor.frame_dependencies = stream_structure.templates[1];
1031 deltaframe1_descriptor.frame_number = 0xfffe;
1032
1033 DependencyDescriptor deltaframe2_descriptor;
1034 deltaframe1_descriptor.frame_dependencies = stream_structure.templates[1];
1035 deltaframe2_descriptor.frame_number = 0x0002;
1036
1037 // Parser should unwrap frame ids correctly even if packets were reordered by
1038 // the network.
1039 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
1040 .WillOnce([&](EncodedFrame* frame) {
1041 // 0x0002 - 0xfff0
1042 EXPECT_EQ(frame->Id() - first_picture_id, 18);
1043 })
1044 .WillOnce([&](EncodedFrame* frame) {
1045 // 0xfffe - 0xfff0
1046 EXPECT_EQ(frame->Id() - first_picture_id, 14);
1047 });
1048 InjectPacketWith(stream_structure, deltaframe2_descriptor);
1049 InjectPacketWith(stream_structure, deltaframe1_descriptor);
1050 }
1051
TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,DropsLateDeltaFramePacketWithDependencyDescriptorExtension)1052 TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,
1053 DropsLateDeltaFramePacketWithDependencyDescriptorExtension) {
1054 FrameDependencyStructure stream_structure1 = CreateStreamStructure();
1055 FrameDependencyStructure stream_structure2 = CreateStreamStructure();
1056 // Make sure template ids for these two structures do not collide:
1057 // adjust structure_id (that is also used as template id offset).
1058 stream_structure1.structure_id = 13;
1059 stream_structure2.structure_id =
1060 stream_structure1.structure_id + stream_structure1.templates.size();
1061
1062 DependencyDescriptor keyframe1_descriptor;
1063 keyframe1_descriptor.attached_structure =
1064 std::make_unique<FrameDependencyStructure>(stream_structure1);
1065 keyframe1_descriptor.frame_dependencies = stream_structure1.templates[0];
1066 keyframe1_descriptor.frame_number = 1;
1067 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame);
1068 InjectPacketWith(stream_structure1, keyframe1_descriptor);
1069
1070 // Pass in 2nd key frame with different structure.
1071 DependencyDescriptor keyframe2_descriptor;
1072 keyframe2_descriptor.attached_structure =
1073 std::make_unique<FrameDependencyStructure>(stream_structure2);
1074 keyframe2_descriptor.frame_dependencies = stream_structure2.templates[0];
1075 keyframe2_descriptor.frame_number = 3;
1076 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame);
1077 InjectPacketWith(stream_structure2, keyframe2_descriptor);
1078
1079 // Pass in late delta frame that uses structure of the 1st key frame.
1080 DependencyDescriptor deltaframe_descriptor;
1081 deltaframe_descriptor.frame_dependencies = stream_structure1.templates[0];
1082 deltaframe_descriptor.frame_number = 2;
1083 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame).Times(0);
1084 InjectPacketWith(stream_structure1, deltaframe_descriptor);
1085 }
1086
TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,DropsLateKeyFramePacketWithDependencyDescriptorExtension)1087 TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,
1088 DropsLateKeyFramePacketWithDependencyDescriptorExtension) {
1089 FrameDependencyStructure stream_structure1 = CreateStreamStructure();
1090 FrameDependencyStructure stream_structure2 = CreateStreamStructure();
1091 // Make sure template ids for these two structures do not collide:
1092 // adjust structure_id (that is also used as template id offset).
1093 stream_structure1.structure_id = 13;
1094 stream_structure2.structure_id =
1095 stream_structure1.structure_id + stream_structure1.templates.size();
1096
1097 DependencyDescriptor keyframe1_descriptor;
1098 keyframe1_descriptor.attached_structure =
1099 std::make_unique<FrameDependencyStructure>(stream_structure1);
1100 keyframe1_descriptor.frame_dependencies = stream_structure1.templates[0];
1101 keyframe1_descriptor.frame_number = 1;
1102
1103 DependencyDescriptor keyframe2_descriptor;
1104 keyframe2_descriptor.attached_structure =
1105 std::make_unique<FrameDependencyStructure>(stream_structure2);
1106 keyframe2_descriptor.frame_dependencies = stream_structure2.templates[0];
1107 keyframe2_descriptor.frame_number = 3;
1108
1109 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
1110 .WillOnce(
1111 [&](EncodedFrame* frame) { EXPECT_EQ(frame->Id() & 0xFFFF, 3); });
1112 InjectPacketWith(stream_structure2, keyframe2_descriptor);
1113 InjectPacketWith(stream_structure1, keyframe1_descriptor);
1114
1115 // Pass in delta frame that uses structure of the 2nd key frame. Late key
1116 // frame shouldn't block it.
1117 DependencyDescriptor deltaframe_descriptor;
1118 deltaframe_descriptor.frame_dependencies = stream_structure2.templates[0];
1119 deltaframe_descriptor.frame_number = 4;
1120 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
1121 .WillOnce(
1122 [&](EncodedFrame* frame) { EXPECT_EQ(frame->Id() & 0xFFFF, 4); });
1123 InjectPacketWith(stream_structure2, deltaframe_descriptor);
1124 }
1125
TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,RequestKeyframeIfInitialKeyframePacketIsLost)1126 TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,
1127 RequestKeyframeIfInitialKeyframePacketIsLost) {
1128 FrameDependencyStructure stream_structure = CreateStreamStructure();
1129
1130 DependencyDescriptor keyframe_descriptor_without_structure;
1131 keyframe_descriptor_without_structure.frame_dependencies =
1132 stream_structure.templates[0];
1133 keyframe_descriptor_without_structure.frame_number = 0;
1134
1135 InjectPacketWith(stream_structure, keyframe_descriptor_without_structure);
1136
1137 // Not enough time since last keyframe request
1138 time_controller_.AdvanceTime(TimeDelta::Millis(500));
1139 InjectPacketWith(stream_structure, keyframe_descriptor_without_structure);
1140 EXPECT_THAT(rtcp_packet_parser_.pli()->num_packets(), Eq(1));
1141
1142 time_controller_.AdvanceTime(TimeDelta::Millis(501));
1143 InjectPacketWith(stream_structure, keyframe_descriptor_without_structure);
1144 EXPECT_THAT(rtcp_packet_parser_.pli()->num_packets(), Eq(2));
1145 }
1146
TEST_F(RtpVideoStreamReceiver2Test,TransformFrame)1147 TEST_F(RtpVideoStreamReceiver2Test, TransformFrame) {
1148 rtc::scoped_refptr<MockFrameTransformer> mock_frame_transformer =
1149 rtc::make_ref_counted<testing::NiceMock<MockFrameTransformer>>();
1150 EXPECT_CALL(*mock_frame_transformer,
1151 RegisterTransformedFrameSinkCallback(_, config_.rtp.remote_ssrc));
1152 auto receiver = std::make_unique<RtpVideoStreamReceiver2>(
1153 TaskQueueBase::Current(), Clock::GetRealTimeClock(), &mock_transport_,
1154 nullptr, nullptr, &config_, rtp_receive_statistics_.get(), nullptr,
1155 nullptr, &nack_periodic_processor_, &mock_on_complete_frame_callback_,
1156 nullptr, mock_frame_transformer, field_trials_, nullptr);
1157 receiver->AddReceiveCodec(kPayloadType, kVideoCodecGeneric, {},
1158 /*raw_payload=*/false);
1159
1160 RtpPacketReceived rtp_packet;
1161 rtp_packet.SetPayloadType(kPayloadType);
1162 rtc::CopyOnWriteBuffer data({'1', '2', '3', '4'});
1163 rtp_packet.SetSequenceNumber(1);
1164 RTPVideoHeader video_header =
1165 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
1166 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
1167 data.size());
1168 EXPECT_CALL(*mock_frame_transformer, Transform(_));
1169 receiver->OnReceivedPayloadData(data, rtp_packet, video_header);
1170
1171 EXPECT_CALL(*mock_frame_transformer,
1172 UnregisterTransformedFrameSinkCallback(config_.rtp.remote_ssrc));
1173 receiver = nullptr;
1174 }
1175
1176 // Test default behavior and when playout delay is overridden by field trial.
1177 const VideoPlayoutDelay kTransmittedPlayoutDelay = {100, 200};
1178 const VideoPlayoutDelay kForcedPlayoutDelay = {70, 90};
1179 struct PlayoutDelayOptions {
1180 std::string field_trial;
1181 VideoPlayoutDelay expected_delay;
1182 };
1183 const PlayoutDelayOptions kDefaultBehavior = {
1184 /*field_trial=*/"", /*expected_delay=*/kTransmittedPlayoutDelay};
1185 const PlayoutDelayOptions kOverridePlayoutDelay = {
1186 /*field_trial=*/"WebRTC-ForcePlayoutDelay/min_ms:70,max_ms:90/",
1187 /*expected_delay=*/kForcedPlayoutDelay};
1188
1189 class RtpVideoStreamReceiver2TestPlayoutDelay
1190 : public RtpVideoStreamReceiver2Test,
1191 public ::testing::WithParamInterface<PlayoutDelayOptions> {
1192 protected:
RtpVideoStreamReceiver2TestPlayoutDelay()1193 RtpVideoStreamReceiver2TestPlayoutDelay()
1194 : RtpVideoStreamReceiver2Test(GetParam().field_trial) {}
1195 };
1196
1197 INSTANTIATE_TEST_SUITE_P(PlayoutDelay,
1198 RtpVideoStreamReceiver2TestPlayoutDelay,
1199 Values(kDefaultBehavior, kOverridePlayoutDelay));
1200
TEST_P(RtpVideoStreamReceiver2TestPlayoutDelay,PlayoutDelay)1201 TEST_P(RtpVideoStreamReceiver2TestPlayoutDelay, PlayoutDelay) {
1202 rtc::CopyOnWriteBuffer payload_data({'1', '2', '3', '4'});
1203 RtpHeaderExtensionMap extension_map;
1204 extension_map.Register<PlayoutDelayLimits>(1);
1205 RtpPacketToSend packet_to_send(&extension_map);
1206 packet_to_send.SetPayloadType(kPayloadType);
1207 packet_to_send.SetSequenceNumber(1);
1208
1209 // Set playout delay on outgoing packet.
1210 EXPECT_TRUE(packet_to_send.SetExtension<PlayoutDelayLimits>(
1211 kTransmittedPlayoutDelay));
1212 uint8_t* payload = packet_to_send.AllocatePayload(payload_data.size());
1213 memcpy(payload, payload_data.data(), payload_data.size());
1214
1215 RtpPacketReceived received_packet(&extension_map);
1216 received_packet.Parse(packet_to_send.data(), packet_to_send.size());
1217
1218 RTPVideoHeader video_header =
1219 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
1220 mock_on_complete_frame_callback_.AppendExpectedBitstream(payload_data.data(),
1221 payload_data.size());
1222 // Expect the playout delay of encoded frame to be the same as the transmitted
1223 // playout delay unless it was overridden by a field trial.
1224 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
1225 .WillOnce(Invoke([expected_playout_delay =
1226 GetParam().expected_delay](EncodedFrame* frame) {
1227 EXPECT_EQ(frame->EncodedImage().playout_delay_, expected_playout_delay);
1228 }));
1229 rtp_video_stream_receiver_->OnReceivedPayloadData(
1230 received_packet.PayloadBuffer(), received_packet, video_header);
1231 }
1232
1233 } // namespace webrtc
1234