1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_dma_buf.h"
7
8 #include <kunit/test.h>
9 #include <linux/dma-buf.h>
10 #include <linux/pci-p2pdma.h>
11
12 #include <drm/drm_device.h>
13 #include <drm/drm_prime.h>
14 #include <drm/ttm/ttm_tt.h>
15
16 #include "tests/xe_test.h"
17 #include "xe_bo.h"
18 #include "xe_device.h"
19 #include "xe_pm.h"
20 #include "xe_ttm_vram_mgr.h"
21 #include "xe_vm.h"
22
23 MODULE_IMPORT_NS("DMA_BUF");
24
xe_dma_buf_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)25 static int xe_dma_buf_attach(struct dma_buf *dmabuf,
26 struct dma_buf_attachment *attach)
27 {
28 struct drm_gem_object *obj = attach->dmabuf->priv;
29
30 if (attach->peer2peer &&
31 pci_p2pdma_distance(to_pci_dev(obj->dev->dev), attach->dev, false) < 0)
32 attach->peer2peer = false;
33
34 if (!attach->peer2peer && !xe_bo_can_migrate(gem_to_xe_bo(obj), XE_PL_TT))
35 return -EOPNOTSUPP;
36
37 xe_pm_runtime_get(to_xe_device(obj->dev));
38 return 0;
39 }
40
xe_dma_buf_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)41 static void xe_dma_buf_detach(struct dma_buf *dmabuf,
42 struct dma_buf_attachment *attach)
43 {
44 struct drm_gem_object *obj = attach->dmabuf->priv;
45
46 xe_pm_runtime_put(to_xe_device(obj->dev));
47 }
48
xe_dma_buf_pin(struct dma_buf_attachment * attach)49 static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
50 {
51 struct drm_gem_object *obj = attach->dmabuf->priv;
52 struct xe_bo *bo = gem_to_xe_bo(obj);
53 struct xe_device *xe = xe_bo_device(bo);
54 int ret;
55
56 /*
57 * For now only support pinning in TT memory, for two reasons:
58 * 1) Avoid pinning in a placement not accessible to some importers.
59 * 2) Pinning in VRAM requires PIN accounting which is a to-do.
60 */
61 if (xe_bo_is_pinned(bo) && !xe_bo_is_mem_type(bo, XE_PL_TT)) {
62 drm_dbg(&xe->drm, "Can't migrate pinned bo for dma-buf pin.\n");
63 return -EINVAL;
64 }
65
66 ret = xe_bo_migrate(bo, XE_PL_TT);
67 if (ret) {
68 if (ret != -EINTR && ret != -ERESTARTSYS)
69 drm_dbg(&xe->drm,
70 "Failed migrating dma-buf to TT memory: %pe\n",
71 ERR_PTR(ret));
72 return ret;
73 }
74
75 ret = xe_bo_pin_external(bo);
76 xe_assert(xe, !ret);
77
78 return 0;
79 }
80
xe_dma_buf_unpin(struct dma_buf_attachment * attach)81 static void xe_dma_buf_unpin(struct dma_buf_attachment *attach)
82 {
83 struct drm_gem_object *obj = attach->dmabuf->priv;
84 struct xe_bo *bo = gem_to_xe_bo(obj);
85
86 xe_bo_unpin_external(bo);
87 }
88
xe_dma_buf_map(struct dma_buf_attachment * attach,enum dma_data_direction dir)89 static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
90 enum dma_data_direction dir)
91 {
92 struct dma_buf *dma_buf = attach->dmabuf;
93 struct drm_gem_object *obj = dma_buf->priv;
94 struct xe_bo *bo = gem_to_xe_bo(obj);
95 struct sg_table *sgt;
96 int r = 0;
97
98 if (!attach->peer2peer && !xe_bo_can_migrate(bo, XE_PL_TT))
99 return ERR_PTR(-EOPNOTSUPP);
100
101 if (!xe_bo_is_pinned(bo)) {
102 if (!attach->peer2peer)
103 r = xe_bo_migrate(bo, XE_PL_TT);
104 else
105 r = xe_bo_validate(bo, NULL, false);
106 if (r)
107 return ERR_PTR(r);
108 }
109
110 switch (bo->ttm.resource->mem_type) {
111 case XE_PL_TT:
112 sgt = drm_prime_pages_to_sg(obj->dev,
113 bo->ttm.ttm->pages,
114 bo->ttm.ttm->num_pages);
115 if (IS_ERR(sgt))
116 return sgt;
117
118 if (dma_map_sgtable(attach->dev, sgt, dir,
119 DMA_ATTR_SKIP_CPU_SYNC))
120 goto error_free;
121 break;
122
123 case XE_PL_VRAM0:
124 case XE_PL_VRAM1:
125 r = xe_ttm_vram_mgr_alloc_sgt(xe_bo_device(bo),
126 bo->ttm.resource, 0,
127 bo->ttm.base.size, attach->dev,
128 dir, &sgt);
129 if (r)
130 return ERR_PTR(r);
131 break;
132 default:
133 return ERR_PTR(-EINVAL);
134 }
135
136 return sgt;
137
138 error_free:
139 sg_free_table(sgt);
140 kfree(sgt);
141 return ERR_PTR(-EBUSY);
142 }
143
xe_dma_buf_unmap(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)144 static void xe_dma_buf_unmap(struct dma_buf_attachment *attach,
145 struct sg_table *sgt,
146 enum dma_data_direction dir)
147 {
148 if (sg_page(sgt->sgl)) {
149 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
150 sg_free_table(sgt);
151 kfree(sgt);
152 } else {
153 xe_ttm_vram_mgr_free_sgt(attach->dev, dir, sgt);
154 }
155 }
156
xe_dma_buf_begin_cpu_access(struct dma_buf * dma_buf,enum dma_data_direction direction)157 static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
158 enum dma_data_direction direction)
159 {
160 struct drm_gem_object *obj = dma_buf->priv;
161 struct xe_bo *bo = gem_to_xe_bo(obj);
162 bool reads = (direction == DMA_BIDIRECTIONAL ||
163 direction == DMA_FROM_DEVICE);
164
165 if (!reads)
166 return 0;
167
168 /* Can we do interruptible lock here? */
169 xe_bo_lock(bo, false);
170 (void)xe_bo_migrate(bo, XE_PL_TT);
171 xe_bo_unlock(bo);
172
173 return 0;
174 }
175
176 static const struct dma_buf_ops xe_dmabuf_ops = {
177 .attach = xe_dma_buf_attach,
178 .detach = xe_dma_buf_detach,
179 .pin = xe_dma_buf_pin,
180 .unpin = xe_dma_buf_unpin,
181 .map_dma_buf = xe_dma_buf_map,
182 .unmap_dma_buf = xe_dma_buf_unmap,
183 .release = drm_gem_dmabuf_release,
184 .begin_cpu_access = xe_dma_buf_begin_cpu_access,
185 .mmap = drm_gem_dmabuf_mmap,
186 .vmap = drm_gem_dmabuf_vmap,
187 .vunmap = drm_gem_dmabuf_vunmap,
188 };
189
xe_gem_prime_export(struct drm_gem_object * obj,int flags)190 struct dma_buf *xe_gem_prime_export(struct drm_gem_object *obj, int flags)
191 {
192 struct xe_bo *bo = gem_to_xe_bo(obj);
193 struct dma_buf *buf;
194
195 if (bo->vm)
196 return ERR_PTR(-EPERM);
197
198 buf = drm_gem_prime_export(obj, flags);
199 if (!IS_ERR(buf))
200 buf->ops = &xe_dmabuf_ops;
201
202 return buf;
203 }
204
205 static struct drm_gem_object *
xe_dma_buf_init_obj(struct drm_device * dev,struct xe_bo * storage,struct dma_buf * dma_buf)206 xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage,
207 struct dma_buf *dma_buf)
208 {
209 struct dma_resv *resv = dma_buf->resv;
210 struct xe_device *xe = to_xe_device(dev);
211 struct xe_bo *bo;
212 int ret;
213
214 dma_resv_lock(resv, NULL);
215 bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL, dma_buf->size,
216 0, /* Will require 1way or 2way for vm_bind */
217 ttm_bo_type_sg, XE_BO_FLAG_SYSTEM);
218 if (IS_ERR(bo)) {
219 ret = PTR_ERR(bo);
220 goto error;
221 }
222 dma_resv_unlock(resv);
223
224 return &bo->ttm.base;
225
226 error:
227 dma_resv_unlock(resv);
228 return ERR_PTR(ret);
229 }
230
xe_dma_buf_move_notify(struct dma_buf_attachment * attach)231 static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach)
232 {
233 struct drm_gem_object *obj = attach->importer_priv;
234 struct xe_bo *bo = gem_to_xe_bo(obj);
235
236 XE_WARN_ON(xe_bo_evict(bo, false));
237 }
238
239 static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = {
240 .allow_peer2peer = true,
241 .move_notify = xe_dma_buf_move_notify
242 };
243
244 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
245
246 struct dma_buf_test_params {
247 struct xe_test_priv base;
248 const struct dma_buf_attach_ops *attach_ops;
249 bool force_different_devices;
250 u32 mem_mask;
251 };
252
253 #define to_dma_buf_test_params(_priv) \
254 container_of(_priv, struct dma_buf_test_params, base)
255 #endif
256
xe_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)257 struct drm_gem_object *xe_gem_prime_import(struct drm_device *dev,
258 struct dma_buf *dma_buf)
259 {
260 XE_TEST_DECLARE(struct dma_buf_test_params *test =
261 to_dma_buf_test_params
262 (xe_cur_kunit_priv(XE_TEST_LIVE_DMA_BUF));)
263 const struct dma_buf_attach_ops *attach_ops;
264 struct dma_buf_attachment *attach;
265 struct drm_gem_object *obj;
266 struct xe_bo *bo;
267
268 if (dma_buf->ops == &xe_dmabuf_ops) {
269 obj = dma_buf->priv;
270 if (obj->dev == dev &&
271 !XE_TEST_ONLY(test && test->force_different_devices)) {
272 /*
273 * Importing dmabuf exported from out own gem increases
274 * refcount on gem itself instead of f_count of dmabuf.
275 */
276 drm_gem_object_get(obj);
277 return obj;
278 }
279 }
280
281 /*
282 * Don't publish the bo until we have a valid attachment, and a
283 * valid attachment needs the bo address. So pre-create a bo before
284 * creating the attachment and publish.
285 */
286 bo = xe_bo_alloc();
287 if (IS_ERR(bo))
288 return ERR_CAST(bo);
289
290 attach_ops = &xe_dma_buf_attach_ops;
291 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
292 if (test)
293 attach_ops = test->attach_ops;
294 #endif
295
296 attach = dma_buf_dynamic_attach(dma_buf, dev->dev, attach_ops, &bo->ttm.base);
297 if (IS_ERR(attach)) {
298 obj = ERR_CAST(attach);
299 goto out_err;
300 }
301
302 /* Errors here will take care of freeing the bo. */
303 obj = xe_dma_buf_init_obj(dev, bo, dma_buf);
304 if (IS_ERR(obj))
305 return obj;
306
307
308 get_dma_buf(dma_buf);
309 obj->import_attach = attach;
310 return obj;
311
312 out_err:
313 xe_bo_free(bo);
314
315 return obj;
316 }
317
318 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
319 #include "tests/xe_dma_buf.c"
320 #endif
321