1 /*
2 * Copyright 2022 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "modules/desktop_capture/linux/wayland/shared_screencast_stream.h"
12
13 #include <fcntl.h>
14 #include <libdrm/drm_fourcc.h>
15 #include <pipewire/pipewire.h>
16 #include <spa/param/video/format-utils.h>
17 #include <sys/mman.h>
18
19 #include <vector>
20
21 #include "absl/memory/memory.h"
22 #include "modules/desktop_capture/linux/wayland/egl_dmabuf.h"
23 #include "modules/desktop_capture/linux/wayland/screencast_stream_utils.h"
24 #include "modules/portal/pipewire_utils.h"
25 #include "rtc_base/checks.h"
26 #include "rtc_base/logging.h"
27 #include "rtc_base/sanitizer.h"
28 #include "rtc_base/synchronization/mutex.h"
29
30 namespace webrtc {
31
32 const int kBytesPerPixel = 4;
33 const int kVideoDamageRegionCount = 16;
34
35 constexpr int kCursorBpp = 4;
CursorMetaSize(int w,int h)36 constexpr int CursorMetaSize(int w, int h) {
37 return (sizeof(struct spa_meta_cursor) + sizeof(struct spa_meta_bitmap) +
38 w * h * kCursorBpp);
39 }
40
41 constexpr PipeWireVersion kDmaBufMinVersion = {0, 3, 24};
42 constexpr PipeWireVersion kDmaBufModifierMinVersion = {0, 3, 33};
43 constexpr PipeWireVersion kDropSingleModifierMinVersion = {0, 3, 40};
44
45 class ScopedBuf {
46 public:
ScopedBuf()47 ScopedBuf() {}
ScopedBuf(uint8_t * map,int map_size,int fd)48 ScopedBuf(uint8_t* map, int map_size, int fd)
49 : map_(map), map_size_(map_size), fd_(fd) {}
~ScopedBuf()50 ~ScopedBuf() {
51 if (map_ != MAP_FAILED) {
52 munmap(map_, map_size_);
53 }
54 }
55
operator bool()56 explicit operator bool() { return map_ != MAP_FAILED; }
57
initialize(uint8_t * map,int map_size,int fd)58 void initialize(uint8_t* map, int map_size, int fd) {
59 map_ = map;
60 map_size_ = map_size;
61 fd_ = fd;
62 }
63
get()64 uint8_t* get() { return map_; }
65
66 protected:
67 uint8_t* map_ = static_cast<uint8_t*>(MAP_FAILED);
68 int map_size_;
69 int fd_;
70 };
71
72 class SharedScreenCastStreamPrivate {
73 public:
74 SharedScreenCastStreamPrivate();
75 ~SharedScreenCastStreamPrivate();
76
77 bool StartScreenCastStream(uint32_t stream_node_id,
78 int fd,
79 uint32_t width = 0,
80 uint32_t height = 0,
81 bool is_cursor_embedded = false);
82 void UpdateScreenCastStreamResolution(uint32_t width, uint32_t height);
SetUseDamageRegion(bool use_damage_region)83 void SetUseDamageRegion(bool use_damage_region) {
84 use_damage_region_ = use_damage_region;
85 }
SetObserver(SharedScreenCastStream::Observer * observer)86 void SetObserver(SharedScreenCastStream::Observer* observer) {
87 observer_ = observer;
88 }
89 void StopScreenCastStream();
90 std::unique_ptr<SharedDesktopFrame> CaptureFrame();
91 std::unique_ptr<MouseCursor> CaptureCursor();
92 DesktopVector CaptureCursorPosition();
93
94 private:
95 // Stops the streams and cleans up any in-use elements.
96 void StopAndCleanupStream();
97
98 SharedScreenCastStream::Observer* observer_ = nullptr;
99
100 // Track damage region updates that were reported since the last time
101 // frame was captured
102 DesktopRegion damage_region_;
103
104 uint32_t pw_stream_node_id_ = 0;
105
106 DesktopSize stream_size_ = {};
107 DesktopSize frame_size_;
108
109 webrtc::Mutex queue_lock_;
110 ScreenCaptureFrameQueue<SharedDesktopFrame> queue_
111 RTC_GUARDED_BY(&queue_lock_);
112 std::unique_ptr<MouseCursor> mouse_cursor_;
113 DesktopVector mouse_cursor_position_ = DesktopVector(-1, -1);
114
115 int64_t modifier_;
116 std::unique_ptr<EglDmaBuf> egl_dmabuf_;
117 // List of modifiers we query as supported by the graphics card/driver
118 std::vector<uint64_t> modifiers_;
119
120 // PipeWire types
121 struct pw_context* pw_context_ = nullptr;
122 struct pw_core* pw_core_ = nullptr;
123 struct pw_stream* pw_stream_ = nullptr;
124 struct pw_thread_loop* pw_main_loop_ = nullptr;
125 struct spa_source* renegotiate_ = nullptr;
126
127 spa_hook spa_core_listener_;
128 spa_hook spa_stream_listener_;
129
130 // A number used to verify all previous methods and the resulting
131 // events have been handled.
132 int server_version_sync_ = 0;
133 // Version of the running PipeWire server we communicate with
134 PipeWireVersion pw_server_version_;
135 // Version of the library used to run our code
136 PipeWireVersion pw_client_version_;
137
138 // Resolution parameters.
139 uint32_t width_ = 0;
140 uint32_t height_ = 0;
141 webrtc::Mutex resolution_lock_;
142 // Resolution changes are processed during buffer processing.
143 bool pending_resolution_change_ RTC_GUARDED_BY(&resolution_lock_) = false;
144
145 bool use_damage_region_ = true;
146
147 // Specifies whether the pipewire stream has been initialized with a request
148 // to embed cursor into the captured frames.
149 bool is_cursor_embedded_ = false;
150
151 // event handlers
152 pw_core_events pw_core_events_ = {};
153 pw_stream_events pw_stream_events_ = {};
154
155 struct spa_video_info_raw spa_video_format_;
156
157 void ProcessBuffer(pw_buffer* buffer);
158 void ConvertRGBxToBGRx(uint8_t* frame, uint32_t size);
159
160 // PipeWire callbacks
161 static void OnCoreError(void* data,
162 uint32_t id,
163 int seq,
164 int res,
165 const char* message);
166 static void OnCoreDone(void* user_data, uint32_t id, int seq);
167 static void OnCoreInfo(void* user_data, const pw_core_info* info);
168 static void OnStreamParamChanged(void* data,
169 uint32_t id,
170 const struct spa_pod* format);
171 static void OnStreamStateChanged(void* data,
172 pw_stream_state old_state,
173 pw_stream_state state,
174 const char* error_message);
175 static void OnStreamProcess(void* data);
176 // This will be invoked in case we fail to process DMA-BUF PW buffer using
177 // negotiated stream parameters (modifier). We will drop the modifier we
178 // failed to use and try to use a different one or fallback to shared memory
179 // buffers.
180 static void OnRenegotiateFormat(void* data, uint64_t);
181 };
182
OnCoreError(void * data,uint32_t id,int seq,int res,const char * message)183 void SharedScreenCastStreamPrivate::OnCoreError(void* data,
184 uint32_t id,
185 int seq,
186 int res,
187 const char* message) {
188 SharedScreenCastStreamPrivate* stream =
189 static_cast<SharedScreenCastStreamPrivate*>(data);
190 RTC_DCHECK(stream);
191
192 RTC_LOG(LS_ERROR) << "PipeWire remote error: " << message;
193 pw_thread_loop_signal(stream->pw_main_loop_, false);
194 }
195
OnCoreInfo(void * data,const pw_core_info * info)196 void SharedScreenCastStreamPrivate::OnCoreInfo(void* data,
197 const pw_core_info* info) {
198 SharedScreenCastStreamPrivate* stream =
199 static_cast<SharedScreenCastStreamPrivate*>(data);
200 RTC_DCHECK(stream);
201
202 stream->pw_server_version_ = PipeWireVersion::Parse(info->version);
203 }
204
OnCoreDone(void * data,uint32_t id,int seq)205 void SharedScreenCastStreamPrivate::OnCoreDone(void* data,
206 uint32_t id,
207 int seq) {
208 const SharedScreenCastStreamPrivate* stream =
209 static_cast<SharedScreenCastStreamPrivate*>(data);
210 RTC_DCHECK(stream);
211
212 if (id == PW_ID_CORE && stream->server_version_sync_ == seq) {
213 pw_thread_loop_signal(stream->pw_main_loop_, false);
214 }
215 }
216
217 // static
OnStreamStateChanged(void * data,pw_stream_state old_state,pw_stream_state state,const char * error_message)218 void SharedScreenCastStreamPrivate::OnStreamStateChanged(
219 void* data,
220 pw_stream_state old_state,
221 pw_stream_state state,
222 const char* error_message) {
223 SharedScreenCastStreamPrivate* that =
224 static_cast<SharedScreenCastStreamPrivate*>(data);
225 RTC_DCHECK(that);
226
227 switch (state) {
228 case PW_STREAM_STATE_ERROR:
229 RTC_LOG(LS_ERROR) << "PipeWire stream state error: " << error_message;
230 break;
231 case PW_STREAM_STATE_PAUSED:
232 if (that->observer_ && old_state != PW_STREAM_STATE_STREAMING) {
233 that->observer_->OnStreamConfigured();
234 }
235 break;
236 case PW_STREAM_STATE_STREAMING:
237 case PW_STREAM_STATE_UNCONNECTED:
238 case PW_STREAM_STATE_CONNECTING:
239 break;
240 }
241 }
242
243 // static
OnStreamParamChanged(void * data,uint32_t id,const struct spa_pod * format)244 void SharedScreenCastStreamPrivate::OnStreamParamChanged(
245 void* data,
246 uint32_t id,
247 const struct spa_pod* format) {
248 SharedScreenCastStreamPrivate* that =
249 static_cast<SharedScreenCastStreamPrivate*>(data);
250 RTC_DCHECK(that);
251
252 RTC_LOG(LS_INFO) << "PipeWire stream format changed.";
253 if (!format || id != SPA_PARAM_Format) {
254 return;
255 }
256
257 spa_format_video_raw_parse(format, &that->spa_video_format_);
258
259 auto width = that->spa_video_format_.size.width;
260 auto height = that->spa_video_format_.size.height;
261 auto stride = SPA_ROUND_UP_N(width * kBytesPerPixel, 4);
262 auto size = height * stride;
263
264 that->stream_size_ = DesktopSize(width, height);
265
266 uint8_t buffer[1024] = {};
267 auto builder = spa_pod_builder{buffer, sizeof(buffer)};
268
269 // Setup buffers and meta header for new format.
270
271 // When SPA_FORMAT_VIDEO_modifier is present we can use DMA-BUFs as
272 // the server announces support for it.
273 // See https://github.com/PipeWire/pipewire/blob/master/doc/dma-buf.dox
274 const bool has_modifier =
275 spa_pod_find_prop(format, nullptr, SPA_FORMAT_VIDEO_modifier);
276 that->modifier_ =
277 has_modifier ? that->spa_video_format_.modifier : DRM_FORMAT_MOD_INVALID;
278 std::vector<const spa_pod*> params;
279 const int buffer_types =
280 has_modifier || (that->pw_server_version_ >= kDmaBufMinVersion)
281 ? (1 << SPA_DATA_DmaBuf) | (1 << SPA_DATA_MemFd) |
282 (1 << SPA_DATA_MemPtr)
283 : (1 << SPA_DATA_MemFd) | (1 << SPA_DATA_MemPtr);
284
285 params.push_back(reinterpret_cast<spa_pod*>(spa_pod_builder_add_object(
286 &builder, SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers,
287 SPA_PARAM_BUFFERS_size, SPA_POD_Int(size), SPA_PARAM_BUFFERS_stride,
288 SPA_POD_Int(stride), SPA_PARAM_BUFFERS_buffers,
289 SPA_POD_CHOICE_RANGE_Int(8, 1, 32), SPA_PARAM_BUFFERS_dataType,
290 SPA_POD_CHOICE_FLAGS_Int(buffer_types))));
291 params.push_back(reinterpret_cast<spa_pod*>(spa_pod_builder_add_object(
292 &builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta, SPA_PARAM_META_type,
293 SPA_POD_Id(SPA_META_Header), SPA_PARAM_META_size,
294 SPA_POD_Int(sizeof(struct spa_meta_header)))));
295 params.push_back(reinterpret_cast<spa_pod*>(spa_pod_builder_add_object(
296 &builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta, SPA_PARAM_META_type,
297 SPA_POD_Id(SPA_META_VideoCrop), SPA_PARAM_META_size,
298 SPA_POD_Int(sizeof(struct spa_meta_region)))));
299 params.push_back(reinterpret_cast<spa_pod*>(spa_pod_builder_add_object(
300 &builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta, SPA_PARAM_META_type,
301 SPA_POD_Id(SPA_META_Cursor), SPA_PARAM_META_size,
302 SPA_POD_CHOICE_RANGE_Int(CursorMetaSize(64, 64), CursorMetaSize(1, 1),
303 CursorMetaSize(384, 384)))));
304 params.push_back(reinterpret_cast<spa_pod*>(spa_pod_builder_add_object(
305 &builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta, SPA_PARAM_META_type,
306 SPA_POD_Id(SPA_META_VideoDamage), SPA_PARAM_META_size,
307 SPA_POD_CHOICE_RANGE_Int(
308 sizeof(struct spa_meta_region) * kVideoDamageRegionCount,
309 sizeof(struct spa_meta_region) * 1,
310 sizeof(struct spa_meta_region) * kVideoDamageRegionCount))));
311
312 pw_stream_update_params(that->pw_stream_, params.data(), params.size());
313 }
314
315 // static
OnStreamProcess(void * data)316 void SharedScreenCastStreamPrivate::OnStreamProcess(void* data) {
317 SharedScreenCastStreamPrivate* that =
318 static_cast<SharedScreenCastStreamPrivate*>(data);
319 RTC_DCHECK(that);
320
321 struct pw_buffer* next_buffer;
322 struct pw_buffer* buffer = nullptr;
323
324 next_buffer = pw_stream_dequeue_buffer(that->pw_stream_);
325 while (next_buffer) {
326 buffer = next_buffer;
327 next_buffer = pw_stream_dequeue_buffer(that->pw_stream_);
328
329 if (next_buffer) {
330 pw_stream_queue_buffer(that->pw_stream_, buffer);
331 }
332 }
333
334 if (!buffer) {
335 return;
336 }
337
338 that->ProcessBuffer(buffer);
339
340 pw_stream_queue_buffer(that->pw_stream_, buffer);
341 }
342
OnRenegotiateFormat(void * data,uint64_t)343 void SharedScreenCastStreamPrivate::OnRenegotiateFormat(void* data, uint64_t) {
344 SharedScreenCastStreamPrivate* that =
345 static_cast<SharedScreenCastStreamPrivate*>(data);
346 RTC_DCHECK(that);
347
348 {
349 PipeWireThreadLoopLock thread_loop_lock(that->pw_main_loop_);
350
351 uint8_t buffer[2048] = {};
352
353 spa_pod_builder builder = spa_pod_builder{buffer, sizeof(buffer)};
354
355 std::vector<const spa_pod*> params;
356 struct spa_rectangle resolution =
357 SPA_RECTANGLE(that->width_, that->height_);
358
359 webrtc::MutexLock lock(&that->resolution_lock_);
360 for (uint32_t format : {SPA_VIDEO_FORMAT_BGRA, SPA_VIDEO_FORMAT_RGBA,
361 SPA_VIDEO_FORMAT_BGRx, SPA_VIDEO_FORMAT_RGBx}) {
362 if (!that->modifiers_.empty()) {
363 params.push_back(BuildFormat(
364 &builder, format, that->modifiers_,
365 that->pending_resolution_change_ ? &resolution : nullptr));
366 }
367 params.push_back(BuildFormat(
368 &builder, format, /*modifiers=*/{},
369 that->pending_resolution_change_ ? &resolution : nullptr));
370 }
371
372 pw_stream_update_params(that->pw_stream_, params.data(), params.size());
373 that->pending_resolution_change_ = false;
374 }
375 }
376
SharedScreenCastStreamPrivate()377 SharedScreenCastStreamPrivate::SharedScreenCastStreamPrivate() {}
378
~SharedScreenCastStreamPrivate()379 SharedScreenCastStreamPrivate::~SharedScreenCastStreamPrivate() {
380 StopAndCleanupStream();
381 }
382
383 RTC_NO_SANITIZE("cfi-icall")
StartScreenCastStream(uint32_t stream_node_id,int fd,uint32_t width,uint32_t height,bool is_cursor_embedded)384 bool SharedScreenCastStreamPrivate::StartScreenCastStream(
385 uint32_t stream_node_id,
386 int fd,
387 uint32_t width,
388 uint32_t height,
389 bool is_cursor_embedded) {
390 width_ = width;
391 height_ = height;
392 is_cursor_embedded_ = is_cursor_embedded;
393 if (!InitializePipeWire()) {
394 RTC_LOG(LS_ERROR) << "Unable to open PipeWire library";
395 return false;
396 }
397 egl_dmabuf_ = std::make_unique<EglDmaBuf>();
398
399 pw_stream_node_id_ = stream_node_id;
400
401 pw_init(/*argc=*/nullptr, /*argc=*/nullptr);
402
403 pw_main_loop_ = pw_thread_loop_new("pipewire-main-loop", nullptr);
404
405 pw_context_ =
406 pw_context_new(pw_thread_loop_get_loop(pw_main_loop_), nullptr, 0);
407 if (!pw_context_) {
408 RTC_LOG(LS_ERROR) << "Failed to create PipeWire context";
409 return false;
410 }
411
412 if (pw_thread_loop_start(pw_main_loop_) < 0) {
413 RTC_LOG(LS_ERROR) << "Failed to start main PipeWire loop";
414 return false;
415 }
416
417 pw_client_version_ = PipeWireVersion::Parse(pw_get_library_version());
418
419 // Initialize event handlers, remote end and stream-related.
420 pw_core_events_.version = PW_VERSION_CORE_EVENTS;
421 pw_core_events_.info = &OnCoreInfo;
422 pw_core_events_.done = &OnCoreDone;
423 pw_core_events_.error = &OnCoreError;
424
425 pw_stream_events_.version = PW_VERSION_STREAM_EVENTS;
426 pw_stream_events_.state_changed = &OnStreamStateChanged;
427 pw_stream_events_.param_changed = &OnStreamParamChanged;
428 pw_stream_events_.process = &OnStreamProcess;
429
430 {
431 PipeWireThreadLoopLock thread_loop_lock(pw_main_loop_);
432
433 if (fd >= 0) {
434 pw_core_ = pw_context_connect_fd(
435 pw_context_, fcntl(fd, F_DUPFD_CLOEXEC), nullptr, 0);
436 } else {
437 pw_core_ = pw_context_connect(pw_context_, nullptr, 0);
438 }
439
440 if (!pw_core_) {
441 RTC_LOG(LS_ERROR) << "Failed to connect PipeWire context";
442 return false;
443 }
444
445 pw_core_add_listener(pw_core_, &spa_core_listener_, &pw_core_events_, this);
446
447 // Add an event that can be later invoked by pw_loop_signal_event()
448 renegotiate_ = pw_loop_add_event(pw_thread_loop_get_loop(pw_main_loop_),
449 OnRenegotiateFormat, this);
450
451 server_version_sync_ =
452 pw_core_sync(pw_core_, PW_ID_CORE, server_version_sync_);
453
454 pw_thread_loop_wait(pw_main_loop_);
455
456 pw_properties* reuseProps =
457 pw_properties_new_string("pipewire.client.reuse=1");
458 pw_stream_ = pw_stream_new(pw_core_, "webrtc-consume-stream", reuseProps);
459
460 if (!pw_stream_) {
461 RTC_LOG(LS_ERROR) << "Failed to create PipeWire stream";
462 return false;
463 }
464
465 pw_stream_add_listener(pw_stream_, &spa_stream_listener_,
466 &pw_stream_events_, this);
467 uint8_t buffer[2048] = {};
468
469 spa_pod_builder builder = spa_pod_builder{buffer, sizeof(buffer)};
470
471 std::vector<const spa_pod*> params;
472 const bool has_required_pw_client_version =
473 pw_client_version_ >= kDmaBufModifierMinVersion;
474 const bool has_required_pw_server_version =
475 pw_server_version_ >= kDmaBufModifierMinVersion;
476 struct spa_rectangle resolution;
477 bool set_resolution = false;
478 if (width && height) {
479 resolution = SPA_RECTANGLE(width, height);
480 set_resolution = true;
481 }
482 for (uint32_t format : {SPA_VIDEO_FORMAT_BGRA, SPA_VIDEO_FORMAT_RGBA,
483 SPA_VIDEO_FORMAT_BGRx, SPA_VIDEO_FORMAT_RGBx}) {
484 // Modifiers can be used with PipeWire >= 0.3.33
485 if (has_required_pw_client_version && has_required_pw_server_version) {
486 modifiers_ = egl_dmabuf_->QueryDmaBufModifiers(format);
487
488 if (!modifiers_.empty()) {
489 params.push_back(BuildFormat(&builder, format, modifiers_,
490 set_resolution ? &resolution : nullptr));
491 }
492 }
493
494 params.push_back(BuildFormat(&builder, format, /*modifiers=*/{},
495 set_resolution ? &resolution : nullptr));
496 }
497
498 if (pw_stream_connect(pw_stream_, PW_DIRECTION_INPUT, pw_stream_node_id_,
499 PW_STREAM_FLAG_AUTOCONNECT, params.data(),
500 params.size()) != 0) {
501 RTC_LOG(LS_ERROR) << "Could not connect receiving stream.";
502 return false;
503 }
504
505 RTC_LOG(LS_INFO) << "PipeWire remote opened.";
506 }
507 return true;
508 }
509
510 RTC_NO_SANITIZE("cfi-icall")
UpdateScreenCastStreamResolution(uint32_t width,uint32_t height)511 void SharedScreenCastStreamPrivate::UpdateScreenCastStreamResolution(
512 uint32_t width,
513 uint32_t height) {
514 if (!width || !height) {
515 RTC_LOG(LS_WARNING) << "Bad resolution specified: " << width << "x"
516 << height;
517 return;
518 }
519 if (!pw_main_loop_) {
520 RTC_LOG(LS_WARNING) << "No main pipewire loop, ignoring resolution change";
521 return;
522 }
523 if (!renegotiate_) {
524 RTC_LOG(LS_WARNING) << "Can not renegotiate stream params, ignoring "
525 << "resolution change";
526 return;
527 }
528 if (width_ != width || height_ != height) {
529 width_ = width;
530 height_ = height;
531 {
532 webrtc::MutexLock lock(&resolution_lock_);
533 pending_resolution_change_ = true;
534 }
535 pw_loop_signal_event(pw_thread_loop_get_loop(pw_main_loop_), renegotiate_);
536 }
537 }
538
StopScreenCastStream()539 void SharedScreenCastStreamPrivate::StopScreenCastStream() {
540 StopAndCleanupStream();
541 }
542
StopAndCleanupStream()543 void SharedScreenCastStreamPrivate::StopAndCleanupStream() {
544 // We get buffers on the PipeWire thread, but this is called from the capturer
545 // thread, so we need to wait on and stop the pipewire thread before we
546 // disconnect the stream so that we can guarantee we aren't in the middle of
547 // processing a new frame.
548
549 // Even if we *do* somehow have the other objects without a pipewire thread,
550 // destroying them without a thread causes a crash.
551 if (!pw_main_loop_)
552 return;
553
554 // While we can stop the thread now, we cannot destroy it until we've cleaned
555 // up the other members.
556 pw_thread_loop_wait(pw_main_loop_);
557 pw_thread_loop_stop(pw_main_loop_);
558
559 if (pw_stream_) {
560 pw_stream_disconnect(pw_stream_);
561 pw_stream_destroy(pw_stream_);
562 pw_stream_ = nullptr;
563
564 {
565 webrtc::MutexLock lock(&queue_lock_);
566 queue_.Reset();
567 }
568 }
569
570 if (pw_core_) {
571 pw_core_disconnect(pw_core_);
572 pw_core_ = nullptr;
573 }
574
575 if (pw_context_) {
576 pw_context_destroy(pw_context_);
577 pw_context_ = nullptr;
578 }
579
580 pw_thread_loop_destroy(pw_main_loop_);
581 pw_main_loop_ = nullptr;
582 }
583
584 std::unique_ptr<SharedDesktopFrame>
CaptureFrame()585 SharedScreenCastStreamPrivate::CaptureFrame() {
586 webrtc::MutexLock lock(&queue_lock_);
587
588 if (!pw_stream_ || !queue_.current_frame()) {
589 return std::unique_ptr<SharedDesktopFrame>{};
590 }
591
592 std::unique_ptr<SharedDesktopFrame> frame = queue_.current_frame()->Share();
593 if (use_damage_region_) {
594 frame->mutable_updated_region()->Swap(&damage_region_);
595 damage_region_.Clear();
596 }
597
598 return frame;
599 }
600
CaptureCursor()601 std::unique_ptr<MouseCursor> SharedScreenCastStreamPrivate::CaptureCursor() {
602 if (!mouse_cursor_) {
603 return nullptr;
604 }
605
606 return std::move(mouse_cursor_);
607 }
608
CaptureCursorPosition()609 DesktopVector SharedScreenCastStreamPrivate::CaptureCursorPosition() {
610 return mouse_cursor_position_;
611 }
612
613 RTC_NO_SANITIZE("cfi-icall")
ProcessBuffer(pw_buffer * buffer)614 void SharedScreenCastStreamPrivate::ProcessBuffer(pw_buffer* buffer) {
615 spa_buffer* spa_buffer = buffer->buffer;
616 ScopedBuf map;
617 std::unique_ptr<uint8_t[]> src_unique_ptr;
618 uint8_t* src = nullptr;
619
620 // Try to update the mouse cursor first, because it can be the only
621 // information carried by the buffer
622 {
623 const struct spa_meta_cursor* cursor =
624 static_cast<struct spa_meta_cursor*>(spa_buffer_find_meta_data(
625 spa_buffer, SPA_META_Cursor, sizeof(*cursor)));
626 if (cursor && spa_meta_cursor_is_valid(cursor)) {
627 struct spa_meta_bitmap* bitmap = nullptr;
628
629 if (cursor->bitmap_offset)
630 bitmap =
631 SPA_MEMBER(cursor, cursor->bitmap_offset, struct spa_meta_bitmap);
632
633 if (bitmap && bitmap->size.width > 0 && bitmap->size.height > 0) {
634 const uint8_t* bitmap_data =
635 SPA_MEMBER(bitmap, bitmap->offset, uint8_t);
636 BasicDesktopFrame* mouse_frame = new BasicDesktopFrame(
637 DesktopSize(bitmap->size.width, bitmap->size.height));
638 mouse_frame->CopyPixelsFrom(
639 bitmap_data, bitmap->stride,
640 DesktopRect::MakeWH(bitmap->size.width, bitmap->size.height));
641 mouse_cursor_ = std::make_unique<MouseCursor>(
642 mouse_frame, DesktopVector(cursor->hotspot.x, cursor->hotspot.y));
643
644 if (observer_) {
645 observer_->OnCursorShapeChanged();
646 }
647 }
648 mouse_cursor_position_.set(cursor->position.x, cursor->position.y);
649
650 if (observer_) {
651 observer_->OnCursorPositionChanged();
652 }
653 }
654 }
655
656 if (spa_buffer->datas[0].chunk->size == 0) {
657 return;
658 }
659
660 if (spa_buffer->datas[0].type == SPA_DATA_MemFd) {
661 map.initialize(
662 static_cast<uint8_t*>(
663 mmap(nullptr,
664 spa_buffer->datas[0].maxsize + spa_buffer->datas[0].mapoffset,
665 PROT_READ, MAP_PRIVATE, spa_buffer->datas[0].fd, 0)),
666 spa_buffer->datas[0].maxsize + spa_buffer->datas[0].mapoffset,
667 spa_buffer->datas[0].fd);
668
669 if (!map) {
670 RTC_LOG(LS_ERROR) << "Failed to mmap the memory: "
671 << std::strerror(errno);
672 return;
673 }
674
675 src = SPA_MEMBER(map.get(), spa_buffer->datas[0].mapoffset, uint8_t);
676 } else if (spa_buffer->datas[0].type == SPA_DATA_DmaBuf) {
677 const uint n_planes = spa_buffer->n_datas;
678
679 if (!n_planes) {
680 return;
681 }
682
683 std::vector<EglDmaBuf::PlaneData> plane_datas;
684 for (uint32_t i = 0; i < n_planes; ++i) {
685 EglDmaBuf::PlaneData data = {
686 static_cast<int32_t>(spa_buffer->datas[i].fd),
687 static_cast<uint32_t>(spa_buffer->datas[i].chunk->stride),
688 static_cast<uint32_t>(spa_buffer->datas[i].chunk->offset)};
689 plane_datas.push_back(data);
690 }
691
692 // When importing DMA-BUFs, we use the stride (number of bytes from one row
693 // of pixels in the buffer) provided by PipeWire. The stride from PipeWire
694 // is given by the graphics driver and some drivers might add some
695 // additional padding for memory layout optimizations so not everytime the
696 // stride is equal to BYTES_PER_PIXEL x WIDTH. This is fine, because during
697 // the import we will use OpenGL and same graphics driver so it will be able
698 // to work with the stride it provided, but later on when we work with
699 // images we get from DMA-BUFs we will need to update the stride to be equal
700 // to BYTES_PER_PIXEL x WIDTH as that's the size of the DesktopFrame we
701 // allocate for each captured frame.
702 src_unique_ptr = egl_dmabuf_->ImageFromDmaBuf(
703 stream_size_, spa_video_format_.format, plane_datas, modifier_);
704 if (src_unique_ptr) {
705 src = src_unique_ptr.get();
706 } else {
707 RTC_LOG(LS_ERROR) << "Dropping DMA-BUF modifier: " << modifier_
708 << " and trying to renegotiate stream parameters";
709
710 if (pw_server_version_ >= kDropSingleModifierMinVersion) {
711 modifiers_.erase(
712 std::remove(modifiers_.begin(), modifiers_.end(), modifier_),
713 modifiers_.end());
714 } else {
715 modifiers_.clear();
716 }
717
718 pw_loop_signal_event(pw_thread_loop_get_loop(pw_main_loop_),
719 renegotiate_);
720 return;
721 }
722 } else if (spa_buffer->datas[0].type == SPA_DATA_MemPtr) {
723 src = static_cast<uint8_t*>(spa_buffer->datas[0].data);
724 }
725
726 if (!src) {
727 if (observer_) {
728 observer_->OnFailedToProcessBuffer();
729 }
730 return;
731 }
732
733 // Use SPA_META_VideoCrop metadata to get the frame size. KDE and GNOME do
734 // handle screen/window sharing differently. KDE/KWin doesn't use
735 // SPA_META_VideoCrop metadata and when sharing a window, it always sets
736 // stream size to size of the window. With that we just allocate the
737 // DesktopFrame using the size of the stream itself. GNOME/Mutter
738 // always sets stream size to the size of the whole screen, even when sharing
739 // a window. To get the real window size we have to use SPA_META_VideoCrop
740 // metadata. This gives us the size we need in order to allocate the
741 // DesktopFrame.
742
743 struct spa_meta_region* videocrop_metadata =
744 static_cast<struct spa_meta_region*>(spa_buffer_find_meta_data(
745 spa_buffer, SPA_META_VideoCrop, sizeof(*videocrop_metadata)));
746
747 // Video size from metadata is bigger than an actual video stream size.
748 // The metadata are wrong or we should up-scale the video...in both cases
749 // just quit now.
750 if (videocrop_metadata &&
751 (videocrop_metadata->region.size.width >
752 static_cast<uint32_t>(stream_size_.width()) ||
753 videocrop_metadata->region.size.height >
754 static_cast<uint32_t>(stream_size_.height()))) {
755 RTC_LOG(LS_ERROR) << "Stream metadata sizes are wrong!";
756
757 if (observer_) {
758 observer_->OnFailedToProcessBuffer();
759 }
760
761 return;
762 }
763
764 // Use SPA_META_VideoCrop metadata to get the DesktopFrame size in case
765 // a windows is shared and it represents just a small portion of the
766 // stream itself. This will be for example used in case of GNOME (Mutter)
767 // where the stream will have the size of the screen itself, but we care
768 // only about smaller portion representing the window inside.
769 bool videocrop_metadata_use = false;
770 const struct spa_rectangle* videocrop_metadata_size =
771 videocrop_metadata ? &videocrop_metadata->region.size : nullptr;
772
773 if (videocrop_metadata_size && videocrop_metadata_size->width != 0 &&
774 videocrop_metadata_size->height != 0 &&
775 (static_cast<int>(videocrop_metadata_size->width) <
776 stream_size_.width() ||
777 static_cast<int>(videocrop_metadata_size->height) <
778 stream_size_.height())) {
779 videocrop_metadata_use = true;
780 }
781
782 if (videocrop_metadata_use) {
783 frame_size_ = DesktopSize(videocrop_metadata_size->width,
784 videocrop_metadata_size->height);
785 } else {
786 frame_size_ = stream_size_;
787 }
788
789 // Get the position of the video crop within the stream. Just double-check
790 // that the position doesn't exceed the size of the stream itself. NOTE:
791 // Currently it looks there is no implementation using this.
792 uint32_t y_offset =
793 videocrop_metadata_use &&
794 (videocrop_metadata->region.position.y + frame_size_.height() <=
795 stream_size_.height())
796 ? videocrop_metadata->region.position.y
797 : 0;
798 uint32_t x_offset =
799 videocrop_metadata_use &&
800 (videocrop_metadata->region.position.x + frame_size_.width() <=
801 stream_size_.width())
802 ? videocrop_metadata->region.position.x
803 : 0;
804
805 const uint32_t stream_stride = kBytesPerPixel * stream_size_.width();
806 uint32_t buffer_stride = spa_buffer->datas[0].chunk->stride;
807 uint32_t src_stride = buffer_stride;
808
809 if (spa_buffer->datas[0].type == SPA_DATA_DmaBuf &&
810 buffer_stride > stream_stride) {
811 // When DMA-BUFs are used, sometimes spa_buffer->stride we get might
812 // contain additional padding, but after we import the buffer, the stride
813 // we used is no longer relevant and we should just calculate it based on
814 // the stream width. For more context see https://crbug.com/1333304.
815 src_stride = stream_stride;
816 }
817
818 uint8_t* updated_src =
819 src + (src_stride * y_offset) + (kBytesPerPixel * x_offset);
820
821 webrtc::MutexLock lock(&queue_lock_);
822
823 queue_.MoveToNextFrame();
824 if (queue_.current_frame() && queue_.current_frame()->IsShared()) {
825 RTC_DLOG(LS_WARNING) << "Overwriting frame that is still shared";
826
827 if (observer_) {
828 observer_->OnFailedToProcessBuffer();
829 }
830 }
831
832 if (!queue_.current_frame() ||
833 !queue_.current_frame()->size().equals(frame_size_)) {
834 std::unique_ptr<DesktopFrame> frame(new BasicDesktopFrame(
835 DesktopSize(frame_size_.width(), frame_size_.height())));
836 queue_.ReplaceCurrentFrame(SharedDesktopFrame::Wrap(std::move(frame)));
837 }
838
839 queue_.current_frame()->CopyPixelsFrom(
840 updated_src, (src_stride - (kBytesPerPixel * x_offset)),
841 DesktopRect::MakeWH(frame_size_.width(), frame_size_.height()));
842
843 if (spa_video_format_.format == SPA_VIDEO_FORMAT_RGBx ||
844 spa_video_format_.format == SPA_VIDEO_FORMAT_RGBA) {
845 uint8_t* tmp_src = queue_.current_frame()->data();
846 for (int i = 0; i < frame_size_.height(); ++i) {
847 // If both sides decided to go with the RGBx format we need to convert
848 // it to BGRx to match color format expected by WebRTC.
849 ConvertRGBxToBGRx(tmp_src, queue_.current_frame()->stride());
850 tmp_src += queue_.current_frame()->stride();
851 }
852 }
853
854 if (observer_) {
855 observer_->OnDesktopFrameChanged();
856 }
857
858 if (use_damage_region_) {
859 const struct spa_meta* video_damage = static_cast<struct spa_meta*>(
860 spa_buffer_find_meta(spa_buffer, SPA_META_VideoDamage));
861 if (video_damage) {
862 spa_meta_region* meta_region;
863
864 queue_.current_frame()->mutable_updated_region()->Clear();
865
866 spa_meta_for_each(meta_region, video_damage) {
867 // Skip empty regions
868 if (meta_region->region.size.width == 0 ||
869 meta_region->region.size.height == 0) {
870 continue;
871 }
872
873 damage_region_.AddRect(DesktopRect::MakeXYWH(
874 meta_region->region.position.x, meta_region->region.position.y,
875 meta_region->region.size.width, meta_region->region.size.height));
876 }
877 } else {
878 damage_region_.SetRect(
879 DesktopRect::MakeSize(queue_.current_frame()->size()));
880 }
881 } else {
882 queue_.current_frame()->mutable_updated_region()->SetRect(
883 DesktopRect::MakeSize(queue_.current_frame()->size()));
884 }
885 queue_.current_frame()->set_may_contain_cursor(is_cursor_embedded_);
886 }
887
ConvertRGBxToBGRx(uint8_t * frame,uint32_t size)888 void SharedScreenCastStreamPrivate::ConvertRGBxToBGRx(uint8_t* frame,
889 uint32_t size) {
890 for (uint32_t i = 0; i < size; i += 4) {
891 uint8_t tempR = frame[i];
892 uint8_t tempB = frame[i + 2];
893 frame[i] = tempB;
894 frame[i + 2] = tempR;
895 }
896 }
897
SharedScreenCastStream()898 SharedScreenCastStream::SharedScreenCastStream()
899 : private_(std::make_unique<SharedScreenCastStreamPrivate>()) {}
900
~SharedScreenCastStream()901 SharedScreenCastStream::~SharedScreenCastStream() {}
902
903 rtc::scoped_refptr<SharedScreenCastStream>
CreateDefault()904 SharedScreenCastStream::CreateDefault() {
905 // Explicit new, to access non-public constructor.
906 return rtc::scoped_refptr<SharedScreenCastStream>(
907 new SharedScreenCastStream());
908 }
909
StartScreenCastStream(uint32_t stream_node_id)910 bool SharedScreenCastStream::StartScreenCastStream(uint32_t stream_node_id) {
911 return private_->StartScreenCastStream(stream_node_id, -1);
912 }
913
StartScreenCastStream(uint32_t stream_node_id,int fd,uint32_t width,uint32_t height,bool is_cursor_embedded)914 bool SharedScreenCastStream::StartScreenCastStream(uint32_t stream_node_id,
915 int fd,
916 uint32_t width,
917 uint32_t height,
918 bool is_cursor_embedded) {
919 return private_->StartScreenCastStream(stream_node_id, fd, width, height,
920 is_cursor_embedded);
921 }
922
UpdateScreenCastStreamResolution(uint32_t width,uint32_t height)923 void SharedScreenCastStream::UpdateScreenCastStreamResolution(uint32_t width,
924 uint32_t height) {
925 private_->UpdateScreenCastStreamResolution(width, height);
926 }
927
SetUseDamageRegion(bool use_damage_region)928 void SharedScreenCastStream::SetUseDamageRegion(bool use_damage_region) {
929 private_->SetUseDamageRegion(use_damage_region);
930 }
931
SetObserver(SharedScreenCastStream::Observer * observer)932 void SharedScreenCastStream::SetObserver(
933 SharedScreenCastStream::Observer* observer) {
934 private_->SetObserver(observer);
935 }
936
StopScreenCastStream()937 void SharedScreenCastStream::StopScreenCastStream() {
938 private_->StopScreenCastStream();
939 }
940
CaptureFrame()941 std::unique_ptr<SharedDesktopFrame> SharedScreenCastStream::CaptureFrame() {
942 return private_->CaptureFrame();
943 }
944
CaptureCursor()945 std::unique_ptr<MouseCursor> SharedScreenCastStream::CaptureCursor() {
946 return private_->CaptureCursor();
947 }
948
CaptureCursorPosition()949 absl::optional<DesktopVector> SharedScreenCastStream::CaptureCursorPosition() {
950 DesktopVector position = private_->CaptureCursorPosition();
951
952 // Consider only (x >= 0 and y >= 0) a valid position
953 if (position.x() < 0 || position.y() < 0) {
954 return absl::nullopt;
955 }
956
957 return position;
958 }
959
960 } // namespace webrtc
961