1 /*
2 * Copyright 2021 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 */
6
7 #include <errno.h>
8 #include <string.h>
9 #include <sys/mman.h>
10 #include <xf86drm.h>
11
12 #include "drv_helpers.h"
13 #include "drv_priv.h"
14 #include "external/virtgpu_cross_domain_protocol.h"
15 #include "external/virtgpu_drm.h"
16 #include "util.h"
17 #include "virtgpu.h"
18
19 #define CAPSET_CROSS_FAKE 30
20
21 static const uint32_t scanout_render_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
22 DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
23 DRM_FORMAT_XRGB8888 };
24
25 static const uint32_t texture_only_formats[] = {
26 DRM_FORMAT_R8, DRM_FORMAT_NV12, DRM_FORMAT_P010,
27 DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID, DRM_FORMAT_ABGR2101010,
28 DRM_FORMAT_ARGB2101010, DRM_FORMAT_XBGR2101010, DRM_FORMAT_XRGB2101010,
29 DRM_FORMAT_ABGR16161616F
30 };
31
32 extern struct virtgpu_param params[];
33
34 struct cross_domain_private {
35 uint32_t ring_handle;
36 void *ring_addr;
37 struct drv_array *metadata_cache;
38 pthread_mutex_t metadata_cache_lock;
39 bool mt8183_camera_quirk_;
40 };
41
cross_domain_release_private(struct driver * drv)42 static void cross_domain_release_private(struct driver *drv)
43 {
44 int ret;
45 struct cross_domain_private *priv = drv->priv;
46 struct drm_gem_close gem_close = { 0 };
47
48 if (priv->ring_addr != MAP_FAILED)
49 munmap(priv->ring_addr, PAGE_SIZE);
50
51 if (priv->ring_handle) {
52 gem_close.handle = priv->ring_handle;
53
54 ret = drmIoctl(drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
55 if (ret) {
56 drv_loge("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n",
57 priv->ring_handle, ret);
58 }
59 }
60
61 if (priv->metadata_cache)
62 drv_array_destroy(priv->metadata_cache);
63
64 pthread_mutex_destroy(&priv->metadata_cache_lock);
65
66 free(priv);
67 }
68
add_combinations(struct driver * drv)69 static void add_combinations(struct driver *drv)
70 {
71 struct format_metadata metadata;
72
73 // Linear metadata always supported.
74 metadata.tiling = 0;
75 metadata.priority = 1;
76 metadata.modifier = DRM_FORMAT_MOD_LINEAR;
77
78 drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
79 &metadata, BO_USE_RENDER_MASK | BO_USE_SCANOUT);
80
81 drv_add_combinations(drv, texture_only_formats, ARRAY_SIZE(texture_only_formats), &metadata,
82 BO_USE_TEXTURE_MASK);
83
84 /* Android CTS tests require this. */
85 drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK);
86
87 drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_ENCODER);
88 drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
89 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
90 BO_USE_SCANOUT | BO_USE_HW_VIDEO_ENCODER);
91
92 /*
93 * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
94 * from camera, input/output from hardware decoder/encoder and sensors, and
95 * AHBs used as SSBOs/UBOs.
96 */
97 drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
98 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
99 BO_USE_HW_VIDEO_ENCODER | BO_USE_SENSOR_DIRECT_DATA |
100 BO_USE_GPU_DATA_BUFFER);
101
102 drv_modify_linear_combinations(drv);
103 }
104
cross_domain_submit_cmd(struct driver * drv,uint32_t * cmd,uint32_t cmd_size,bool wait)105 static int cross_domain_submit_cmd(struct driver *drv, uint32_t *cmd, uint32_t cmd_size, bool wait)
106 {
107 int ret;
108 struct drm_virtgpu_3d_wait wait_3d = { 0 };
109 struct drm_virtgpu_execbuffer exec = { 0 };
110 struct cross_domain_private *priv = drv->priv;
111
112 exec.flags = VIRTGPU_EXECBUF_RING_IDX;
113 exec.command = (uint64_t)&cmd[0];
114 exec.size = cmd_size;
115 if (wait) {
116 exec.bo_handles = (uint64_t)&priv->ring_handle;
117 exec.num_bo_handles = 1;
118 }
119
120 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &exec);
121 if (ret < 0) {
122 drv_loge("DRM_IOCTL_VIRTGPU_EXECBUFFER failed with %s\n", strerror(errno));
123 return -EINVAL;
124 }
125
126 ret = -EAGAIN;
127 while (ret == -EAGAIN) {
128 wait_3d.handle = priv->ring_handle;
129 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &wait_3d);
130 }
131
132 if (ret < 0) {
133 drv_loge("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
134 return ret;
135 }
136
137 return 0;
138 }
139
metadata_equal(struct bo_metadata * current,struct bo_metadata * cached)140 static bool metadata_equal(struct bo_metadata *current, struct bo_metadata *cached)
141 {
142 if ((current->width == cached->width) && (current->height == cached->height) &&
143 (current->format == cached->format) && (current->use_flags == cached->use_flags))
144 return true;
145 return false;
146 }
147
cross_domain_metadata_query(struct driver * drv,struct bo_metadata * metadata)148 static int cross_domain_metadata_query(struct driver *drv, struct bo_metadata *metadata)
149 {
150 int ret = 0;
151 struct bo_metadata *cached_data = NULL;
152 struct cross_domain_private *priv = drv->priv;
153 struct CrossDomainGetImageRequirements cmd_get_reqs;
154 uint32_t *addr = (uint32_t *)priv->ring_addr;
155 uint32_t plane;
156
157 memset(&cmd_get_reqs, 0, sizeof(cmd_get_reqs));
158 pthread_mutex_lock(&priv->metadata_cache_lock);
159 for (uint32_t i = 0; i < drv_array_size(priv->metadata_cache); i++) {
160 cached_data = (struct bo_metadata *)drv_array_at_idx(priv->metadata_cache, i);
161 if (!metadata_equal(metadata, cached_data))
162 continue;
163
164 memcpy(metadata, cached_data, sizeof(*cached_data));
165 goto out_unlock;
166 }
167
168 cmd_get_reqs.hdr.cmd = CROSS_DOMAIN_CMD_GET_IMAGE_REQUIREMENTS;
169 cmd_get_reqs.hdr.cmd_size = sizeof(struct CrossDomainGetImageRequirements);
170
171 cmd_get_reqs.width = metadata->width;
172 cmd_get_reqs.height = metadata->height;
173 cmd_get_reqs.drm_format = metadata->format;
174 cmd_get_reqs.flags = metadata->use_flags;
175
176 // HACK(b/360937659): see also: b/172389166, for history
177 // host minigbm has a hack that recognizes DRM_FORMAT_YVU420 + BO_USE_LINEAR and replaces
178 // the format internally back to DRM_FORMAT_YVU420_ANDROID to use the approrpriate layout
179 // rules.
180 if (cmd_get_reqs.drm_format == DRM_FORMAT_YVU420_ANDROID) {
181 cmd_get_reqs.drm_format = DRM_FORMAT_YVU420;
182 cmd_get_reqs.flags |= BO_USE_LINEAR;
183 }
184
185 /*
186 * It is possible to avoid blocking other bo_create() calls by unlocking before
187 * cross_domain_submit_cmd() and re-locking afterwards. However, that would require
188 * another scan of the metadata cache before drv_array_append in case two bo_create() calls
189 * do the same metadata query. Until cross_domain functionality is more widely tested,
190 * leave this optimization out for now.
191 */
192 ret = cross_domain_submit_cmd(drv, (uint32_t *)&cmd_get_reqs, cmd_get_reqs.hdr.cmd_size,
193 true);
194 if (ret < 0)
195 goto out_unlock;
196
197 memcpy(&metadata->strides, &addr[0], 4 * sizeof(uint32_t));
198 memcpy(&metadata->offsets, &addr[4], 4 * sizeof(uint32_t));
199 memcpy(&metadata->format_modifier, &addr[8], sizeof(uint64_t));
200 memcpy(&metadata->total_size, &addr[10], sizeof(uint64_t));
201 memcpy(&metadata->blob_id, &addr[12], sizeof(uint32_t));
202
203 metadata->map_info = addr[13];
204 metadata->memory_idx = addr[14];
205 metadata->physical_device_idx = addr[15];
206
207 for (plane = 1; plane < metadata->num_planes; plane++) {
208 metadata->sizes[plane - 1] =
209 metadata->offsets[plane] - metadata->offsets[plane - 1];
210 }
211 metadata->sizes[plane - 1] = metadata->total_size - metadata->offsets[plane - 1];
212
213 drv_array_append(priv->metadata_cache, metadata);
214
215 out_unlock:
216 pthread_mutex_unlock(&priv->metadata_cache_lock);
217 return ret;
218 }
219
220 /* Fill out metadata for guest buffers, used only for CPU access: */
cross_domain_get_emulated_metadata(struct bo_metadata * metadata)221 void cross_domain_get_emulated_metadata(struct bo_metadata *metadata)
222 {
223 uint32_t offset = 0;
224
225 for (size_t i = 0; i < metadata->num_planes; i++) {
226 metadata->strides[i] = drv_stride_from_format(metadata->format, metadata->width, i);
227 metadata->sizes[i] = drv_size_from_format(metadata->format, metadata->strides[i],
228 metadata->height, i);
229 metadata->offsets[i] = offset;
230 offset += metadata->sizes[i];
231 }
232
233 metadata->total_size = offset;
234 }
235
cross_domain_init(struct driver * drv)236 static int cross_domain_init(struct driver *drv)
237 {
238 int ret;
239 struct cross_domain_private *priv;
240 struct drm_virtgpu_map map = { 0 };
241 struct drm_virtgpu_get_caps args = { 0 };
242 struct drm_virtgpu_context_init init = { 0 };
243 struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
244 struct drm_virtgpu_context_set_param ctx_set_params[2] = { { 0 } };
245
246 struct CrossDomainInit cmd_init;
247 struct CrossDomainCapabilities cross_domain_caps;
248
249 memset(&cmd_init, 0, sizeof(cmd_init));
250 if (!params[param_context_init].value)
251 return -ENOTSUP;
252
253 if ((params[param_supported_capset_ids].value & (1 << VIRTIO_GPU_CAPSET_CROSS_DOMAIN)) == 0)
254 return -ENOTSUP;
255
256 if (!params[param_resource_blob].value)
257 return -ENOTSUP;
258
259 /// Need zero copy memory
260 if (!params[param_host_visible].value && !params[param_create_guest_handle].value)
261 return -ENOTSUP;
262
263 priv = calloc(1, sizeof(*priv));
264 if (!priv)
265 return -ENOMEM;
266
267 ret = pthread_mutex_init(&priv->metadata_cache_lock, NULL);
268 if (ret) {
269 free(priv);
270 return ret;
271 }
272
273 priv->metadata_cache = drv_array_init(sizeof(struct bo_metadata));
274 if (!priv->metadata_cache) {
275 ret = -ENOMEM;
276 goto free_private;
277 }
278
279 priv->ring_addr = MAP_FAILED;
280 drv->priv = priv;
281
282 args.cap_set_id = VIRTIO_GPU_CAPSET_CROSS_DOMAIN;
283 args.size = sizeof(struct CrossDomainCapabilities);
284 args.addr = (unsigned long long)&cross_domain_caps;
285
286 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
287 if (ret) {
288 drv_loge("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
289 goto free_private;
290 }
291
292 // When 3D features are avilable, but the host does not support external memory, fall back
293 // to the virgl minigbm backend. This typically means the guest side minigbm resource will
294 // be backed by a host OpenGL texture.
295 if (!cross_domain_caps.supports_external_gpu_memory && params[param_3d].value) {
296 ret = -ENOTSUP;
297 goto free_private;
298 }
299
300 // Intialize the cross domain context. Create one fence context to wait for metadata
301 // queries.
302 ctx_set_params[0].param = VIRTGPU_CONTEXT_PARAM_CAPSET_ID;
303 ctx_set_params[0].value = VIRTIO_GPU_CAPSET_CROSS_DOMAIN;
304 ctx_set_params[1].param = VIRTGPU_CONTEXT_PARAM_NUM_RINGS;
305 ctx_set_params[1].value = 1;
306
307 init.ctx_set_params = (unsigned long long)&ctx_set_params[0];
308 init.num_params = 2;
309 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_CONTEXT_INIT, &init);
310 if (ret) {
311 drv_loge("DRM_IOCTL_VIRTGPU_CONTEXT_INIT failed with %s\n", strerror(errno));
312 goto free_private;
313 }
314
315 // Create a shared ring buffer to read metadata queries.
316 drm_rc_blob.size = PAGE_SIZE;
317 drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
318 drm_rc_blob.blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
319
320 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
321 if (ret < 0) {
322 drv_loge("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno));
323 goto free_private;
324 }
325
326 priv->ring_handle = drm_rc_blob.bo_handle;
327
328 // Map shared ring buffer.
329 map.handle = priv->ring_handle;
330 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_MAP, &map);
331 if (ret < 0) {
332 drv_loge("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
333 goto free_private;
334 }
335
336 priv->ring_addr =
337 mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, drv->fd, map.offset);
338
339 if (priv->ring_addr == MAP_FAILED) {
340 drv_loge("mmap failed with %s\n", strerror(errno));
341 goto free_private;
342 }
343
344 // Notify host about ring buffer
345 cmd_init.hdr.cmd = CROSS_DOMAIN_CMD_INIT;
346 cmd_init.hdr.cmd_size = sizeof(struct CrossDomainInit);
347 cmd_init.ring_id = drm_rc_blob.res_handle;
348 ret = cross_domain_submit_cmd(drv, (uint32_t *)&cmd_init, cmd_init.hdr.cmd_size, false);
349 if (ret < 0)
350 goto free_private;
351
352 const char *name;
353 name = drv_get_os_option("ro.product.name");
354 priv->mt8183_camera_quirk_ = name && !strcmp(name, "kukui");
355
356 // minigbm bookkeeping
357 add_combinations(drv);
358 return 0;
359
360 free_private:
361 cross_domain_release_private(drv);
362 return ret;
363 }
364
cross_domain_close(struct driver * drv)365 static void cross_domain_close(struct driver *drv)
366 {
367 cross_domain_release_private(drv);
368 }
369
cross_domain_bo_create(struct bo * bo,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags)370 static int cross_domain_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
371 uint64_t use_flags)
372 {
373 int ret;
374 uint32_t blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
375 struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
376
377 if (use_flags & (BO_USE_SW_MASK | BO_USE_GPU_DATA_BUFFER))
378 blob_flags |= VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
379
380 if (!(use_flags & BO_USE_HW_MASK)) {
381 cross_domain_get_emulated_metadata(&bo->meta);
382 drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
383 } else {
384 ret = cross_domain_metadata_query(bo->drv, &bo->meta);
385 if (ret < 0) {
386 drv_loge("Metadata query failed");
387 return ret;
388 }
389
390 if (params[param_cross_device].value)
391 blob_flags |= VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE;
392
393 /// It may be possible to have host3d blobs and handles from guest memory at the
394 /// same time. But for the immediate use cases, we will either have one or the
395 /// other. For now, just prefer guest memory since adding that feature is more
396 /// involved (requires --udmabuf flag to crosvm), so developers would likely test
397 /// that.
398 if (params[param_create_guest_handle].value) {
399 drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
400 blob_flags |= VIRTGPU_BLOB_FLAG_CREATE_GUEST_HANDLE;
401 } else if (params[param_host_visible].value) {
402 drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
403 }
404 drm_rc_blob.blob_id = (uint64_t)bo->meta.blob_id;
405 }
406
407 drm_rc_blob.size = bo->meta.total_size;
408 drm_rc_blob.blob_flags = blob_flags;
409
410 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
411 if (ret < 0) {
412 drv_loge("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno));
413 return -errno;
414 }
415
416 bo->handle.u32 = drm_rc_blob.bo_handle;
417
418 return 0;
419 }
420
cross_domain_bo_map(struct bo * bo,struct vma * vma,uint32_t map_flags)421 static void *cross_domain_bo_map(struct bo *bo, struct vma *vma, uint32_t map_flags)
422 {
423 int ret;
424 struct drm_virtgpu_map gem_map = { 0 };
425
426 gem_map.handle = bo->handle.u32;
427 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
428 if (ret) {
429 drv_loge("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
430 return MAP_FAILED;
431 }
432
433 vma->length = bo->meta.total_size;
434 return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
435 gem_map.offset);
436 }
437
cross_domain_resolve_format_and_use_flags(struct driver * drv,uint32_t format,uint64_t use_flags,uint32_t * out_format,uint64_t * out_use_flags)438 static void cross_domain_resolve_format_and_use_flags(struct driver *drv, uint32_t format,
439 uint64_t use_flags, uint32_t *out_format,
440 uint64_t *out_use_flags)
441 {
442 struct cross_domain_private *priv = drv->priv;
443 *out_format = format;
444 *out_use_flags = use_flags;
445
446 switch (format) {
447 case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
448 if (priv->mt8183_camera_quirk_ && (use_flags & BO_USE_CAMERA_READ) &&
449 !(use_flags & BO_USE_SCANOUT)) {
450 *out_format = DRM_FORMAT_MTISP_SXYZW10;
451 break;
452 }
453 /* Common camera implementation defined format. */
454 if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE)) {
455 *out_format = DRM_FORMAT_NV12;
456 } else {
457 /* HACK: See b/28671744 */
458 *out_format = DRM_FORMAT_XBGR8888;
459 *out_use_flags &= ~BO_USE_HW_VIDEO_ENCODER;
460 }
461 break;
462 case DRM_FORMAT_FLEX_YCbCr_420_888:
463 /* Common flexible video format. */
464 *out_format = DRM_FORMAT_NV12;
465 break;
466 case DRM_FORMAT_YVU420_ANDROID:
467 *out_use_flags &= ~BO_USE_SCANOUT;
468 break;
469 default:
470 break;
471 }
472 }
473
474 const struct backend virtgpu_cross_domain = {
475 .name = "virtgpu_cross_domain",
476 .init = cross_domain_init,
477 .close = cross_domain_close,
478 .bo_create = cross_domain_bo_create,
479 .bo_import = drv_prime_bo_import,
480 .bo_destroy = drv_gem_bo_destroy,
481 .bo_map = cross_domain_bo_map,
482 .bo_unmap = drv_bo_munmap,
483 .resolve_format_and_use_flags = cross_domain_resolve_format_and_use_flags,
484 };
485