1 #include "nouveau_bo.h"
2
3 #include "drm-uapi/nouveau_drm.h"
4 #include "util/hash_table.h"
5 #include "util/u_math.h"
6
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <stddef.h>
10 #include <sys/mman.h>
11 #include <xf86drm.h>
12
13 #include "nvidia/classes/cl9097.h"
14 #include "nvidia/classes/clc597.h"
15
16 void
nouveau_ws_bo_unbind_vma(struct nouveau_ws_device * dev,uint64_t offset,uint64_t range)17 nouveau_ws_bo_unbind_vma(struct nouveau_ws_device *dev,
18 uint64_t offset, uint64_t range)
19 {
20 assert(dev->has_vm_bind);
21
22 struct drm_nouveau_vm_bind_op newbindop = {
23 .op = DRM_NOUVEAU_VM_BIND_OP_UNMAP,
24 .addr = offset,
25 .range = range,
26 };
27 struct drm_nouveau_vm_bind vmbind = {
28 .op_count = 1,
29 .op_ptr = (uint64_t)(uintptr_t)(void *)&newbindop,
30 };
31 ASSERTED int ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_VM_BIND,
32 &vmbind, sizeof(vmbind));
33 assert(ret == 0);
34 }
35
36 void
nouveau_ws_bo_bind_vma(struct nouveau_ws_device * dev,struct nouveau_ws_bo * bo,uint64_t addr,uint64_t range,uint64_t bo_offset,uint32_t pte_kind)37 nouveau_ws_bo_bind_vma(struct nouveau_ws_device *dev,
38 struct nouveau_ws_bo *bo,
39 uint64_t addr,
40 uint64_t range,
41 uint64_t bo_offset,
42 uint32_t pte_kind)
43 {
44 assert(dev->has_vm_bind);
45
46 struct drm_nouveau_vm_bind_op newbindop = {
47 .op = DRM_NOUVEAU_VM_BIND_OP_MAP,
48 .handle = bo->handle,
49 .addr = addr,
50 .range = range,
51 .bo_offset = bo_offset,
52 .flags = pte_kind,
53 };
54 struct drm_nouveau_vm_bind vmbind = {
55 .op_count = 1,
56 .op_ptr = (uint64_t)(uintptr_t)(void *)&newbindop,
57 };
58 ASSERTED int ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_VM_BIND,
59 &vmbind, sizeof(vmbind));
60 assert(ret == 0);
61 }
62
63 struct nouveau_ws_bo *
nouveau_ws_bo_new_mapped(struct nouveau_ws_device * dev,uint64_t size,uint64_t align,enum nouveau_ws_bo_flags flags,enum nouveau_ws_bo_map_flags map_flags,void ** map_out)64 nouveau_ws_bo_new_mapped(struct nouveau_ws_device *dev,
65 uint64_t size, uint64_t align,
66 enum nouveau_ws_bo_flags flags,
67 enum nouveau_ws_bo_map_flags map_flags,
68 void **map_out)
69 {
70 struct nouveau_ws_bo *bo = nouveau_ws_bo_new(dev, size, align,
71 flags | NOUVEAU_WS_BO_MAP);
72 if (!bo)
73 return NULL;
74
75 void *map = nouveau_ws_bo_map(bo, map_flags);
76 if (map == NULL) {
77 nouveau_ws_bo_destroy(bo);
78 return NULL;
79 }
80
81 *map_out = map;
82 return bo;
83 }
84
85 static struct nouveau_ws_bo *
nouveau_ws_bo_new_tiled_locked(struct nouveau_ws_device * dev,uint64_t size,uint64_t align,uint8_t pte_kind,uint16_t tile_mode,enum nouveau_ws_bo_flags flags)86 nouveau_ws_bo_new_tiled_locked(struct nouveau_ws_device *dev,
87 uint64_t size, uint64_t align,
88 uint8_t pte_kind, uint16_t tile_mode,
89 enum nouveau_ws_bo_flags flags)
90 {
91 struct drm_nouveau_gem_new req = {};
92
93 /* if the caller doesn't care, use the GPU page size */
94 if (align == 0)
95 align = 0x1000;
96
97 /* Align the size */
98 size = align64(size, align);
99
100 req.info.domain = 0;
101
102 /* It needs to live somewhere */
103 assert((flags & NOUVEAU_WS_BO_VRAM) || (flags & NOUVEAU_WS_BO_GART));
104
105 if (flags & NOUVEAU_WS_BO_VRAM)
106 req.info.domain |= NOUVEAU_GEM_DOMAIN_VRAM;
107
108 if (flags & NOUVEAU_WS_BO_GART)
109 req.info.domain |= NOUVEAU_GEM_DOMAIN_GART;
110
111 if (flags & NOUVEAU_WS_BO_MAP)
112 req.info.domain |= NOUVEAU_GEM_DOMAIN_MAPPABLE;
113
114 if (flags & NOUVEAU_WS_BO_NO_SHARE)
115 req.info.domain |= NOUVEAU_GEM_DOMAIN_NO_SHARE;
116
117 req.info.tile_flags = (uint32_t)pte_kind << 8;
118 req.info.tile_mode = tile_mode;
119
120 req.info.size = size;
121 req.align = align;
122
123 int ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_GEM_NEW, &req, sizeof(req));
124 if (ret != 0)
125 return NULL;
126
127 struct nouveau_ws_bo *bo = CALLOC_STRUCT(nouveau_ws_bo);
128 bo->size = size;
129 bo->handle = req.info.handle;
130 bo->map_handle = req.info.map_handle;
131 bo->dev = dev;
132 bo->flags = flags;
133 bo->refcnt = 1;
134
135 _mesa_hash_table_insert(dev->bos, (void *)(uintptr_t)bo->handle, bo);
136
137 return bo;
138 }
139
140 struct nouveau_ws_bo *
nouveau_ws_bo_new_tiled(struct nouveau_ws_device * dev,uint64_t size,uint64_t align,uint8_t pte_kind,uint16_t tile_mode,enum nouveau_ws_bo_flags flags)141 nouveau_ws_bo_new_tiled(struct nouveau_ws_device *dev,
142 uint64_t size, uint64_t align,
143 uint8_t pte_kind, uint16_t tile_mode,
144 enum nouveau_ws_bo_flags flags)
145 {
146 struct nouveau_ws_bo *bo;
147
148 simple_mtx_lock(&dev->bos_lock);
149 bo = nouveau_ws_bo_new_tiled_locked(dev, size, align,
150 pte_kind, tile_mode, flags);
151 simple_mtx_unlock(&dev->bos_lock);
152
153 return bo;
154 }
155
156 struct nouveau_ws_bo *
nouveau_ws_bo_new(struct nouveau_ws_device * dev,uint64_t size,uint64_t align,enum nouveau_ws_bo_flags flags)157 nouveau_ws_bo_new(struct nouveau_ws_device *dev,
158 uint64_t size, uint64_t align,
159 enum nouveau_ws_bo_flags flags)
160 {
161 return nouveau_ws_bo_new_tiled(dev, size, align, 0, 0, flags);
162 }
163
164 static struct nouveau_ws_bo *
nouveau_ws_bo_from_dma_buf_locked(struct nouveau_ws_device * dev,int fd)165 nouveau_ws_bo_from_dma_buf_locked(struct nouveau_ws_device *dev, int fd)
166 {
167 uint32_t handle;
168 int ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
169 if (ret != 0)
170 return NULL;
171
172 struct hash_entry *entry =
173 _mesa_hash_table_search(dev->bos, (void *)(uintptr_t)handle);
174 if (entry != NULL) {
175 struct nouveau_ws_bo *bo = entry->data;
176 nouveau_ws_bo_ref(bo);
177 return bo;
178 }
179
180 /*
181 * If we got here, no BO exists for the retrieved handle. If we error
182 * after this point, we need to close the handle.
183 */
184
185 struct drm_nouveau_gem_info info = {
186 .handle = handle
187 };
188 ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_GEM_INFO,
189 &info, sizeof(info));
190 if (ret != 0)
191 goto fail_fd_to_handle;
192
193 enum nouveau_ws_bo_flags flags = 0;
194 if (info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
195 flags |= NOUVEAU_WS_BO_VRAM;
196 if (info.domain & NOUVEAU_GEM_DOMAIN_GART)
197 flags |= NOUVEAU_WS_BO_GART;
198 if (info.map_handle)
199 flags |= NOUVEAU_WS_BO_MAP;
200
201 struct nouveau_ws_bo *bo = CALLOC_STRUCT(nouveau_ws_bo);
202 bo->size = info.size;
203 bo->handle = info.handle;
204 bo->map_handle = info.map_handle;
205 bo->dev = dev;
206 bo->flags = flags;
207 bo->refcnt = 1;
208
209 uint64_t align = (1ULL << 12);
210 if (info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
211 align = (1ULL << 16);
212
213 assert(bo->size == align64(bo->size, align));
214
215 _mesa_hash_table_insert(dev->bos, (void *)(uintptr_t)handle, bo);
216
217 return bo;
218
219 fail_fd_to_handle:
220 drmCloseBufferHandle(dev->fd, handle);
221
222 return NULL;
223 }
224
225 struct nouveau_ws_bo *
nouveau_ws_bo_from_dma_buf(struct nouveau_ws_device * dev,int fd)226 nouveau_ws_bo_from_dma_buf(struct nouveau_ws_device *dev, int fd)
227 {
228 struct nouveau_ws_bo *bo;
229
230 simple_mtx_lock(&dev->bos_lock);
231 bo = nouveau_ws_bo_from_dma_buf_locked(dev, fd);
232 simple_mtx_unlock(&dev->bos_lock);
233
234 return bo;
235 }
236
237 static bool
atomic_dec_not_one(atomic_uint_fast32_t * counter)238 atomic_dec_not_one(atomic_uint_fast32_t *counter)
239 {
240 uint_fast32_t old = *counter;
241 while (1) {
242 assert(old != 0);
243 if (old == 1)
244 return false;
245
246 if (atomic_compare_exchange_weak(counter, &old, old - 1))
247 return true;
248 }
249 }
250
251 void
nouveau_ws_bo_destroy(struct nouveau_ws_bo * bo)252 nouveau_ws_bo_destroy(struct nouveau_ws_bo *bo)
253 {
254 if (atomic_dec_not_one(&bo->refcnt))
255 return;
256
257 struct nouveau_ws_device *dev = bo->dev;
258
259 /* Lock the device before we drop the final reference */
260 simple_mtx_lock(&dev->bos_lock);
261
262 if (--bo->refcnt == 0) {
263 _mesa_hash_table_remove_key(dev->bos, (void *)(uintptr_t)bo->handle);
264
265 drmCloseBufferHandle(bo->dev->fd, bo->handle);
266 FREE(bo);
267 }
268
269 simple_mtx_unlock(&dev->bos_lock);
270 }
271
272 void *
nouveau_ws_bo_map(struct nouveau_ws_bo * bo,enum nouveau_ws_bo_map_flags flags)273 nouveau_ws_bo_map(struct nouveau_ws_bo *bo,
274 enum nouveau_ws_bo_map_flags flags)
275 {
276 int prot = 0;
277 if (flags & NOUVEAU_WS_BO_RD)
278 prot |= PROT_READ;
279 if (flags & NOUVEAU_WS_BO_WR)
280 prot |= PROT_WRITE;
281
282
283 void *res = mmap(NULL, bo->size, prot, MAP_SHARED,
284 bo->dev->fd, bo->map_handle);
285 if (res == MAP_FAILED)
286 return NULL;
287
288 return res;
289 }
290
291 void
nouveau_ws_bo_unmap(struct nouveau_ws_bo * bo,void * ptr)292 nouveau_ws_bo_unmap(struct nouveau_ws_bo *bo, void *ptr)
293 {
294 munmap(ptr, bo->size);
295 }
296
297 bool
nouveau_ws_bo_wait(struct nouveau_ws_bo * bo,enum nouveau_ws_bo_map_flags flags)298 nouveau_ws_bo_wait(struct nouveau_ws_bo *bo, enum nouveau_ws_bo_map_flags flags)
299 {
300 struct drm_nouveau_gem_cpu_prep req = {};
301
302 req.handle = bo->handle;
303 if (flags & NOUVEAU_WS_BO_WR)
304 req.flags |= NOUVEAU_GEM_CPU_PREP_WRITE;
305
306 return !drmCommandWrite(bo->dev->fd, DRM_NOUVEAU_GEM_CPU_PREP, &req, sizeof(req));
307 }
308
309 int
nouveau_ws_bo_dma_buf(struct nouveau_ws_bo * bo,int * fd)310 nouveau_ws_bo_dma_buf(struct nouveau_ws_bo *bo, int *fd)
311 {
312 return drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC, fd);
313 }
314