xref: /aosp_15_r20/external/mesa3d/src/nouveau/vulkan/nvkmd/nvkmd.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2024 Collabora Ltd. and Red Hat Inc.
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "nvkmd.h"
7 #include "nouveau/nvkmd_nouveau.h"
8 
9 #include <inttypes.h>
10 
11 void
nvkmd_mem_init(struct nvkmd_dev * dev,struct nvkmd_mem * mem,const struct nvkmd_mem_ops * ops,enum nvkmd_mem_flags flags,uint64_t size_B,uint32_t bind_align_B)12 nvkmd_mem_init(struct nvkmd_dev *dev,
13                struct nvkmd_mem *mem,
14                const struct nvkmd_mem_ops *ops,
15                enum nvkmd_mem_flags flags,
16                uint64_t size_B,
17                uint32_t bind_align_B)
18 {
19    *mem = (struct nvkmd_mem) {
20       .ops = ops,
21       .dev = dev,
22       .refcnt = 1,
23       .flags = flags,
24       .bind_align_B = bind_align_B,
25       .size_B = size_B,
26    };
27 
28    simple_mtx_init(&mem->map_mutex, mtx_plain);
29 }
30 
31 VkResult
nvkmd_try_create_pdev_for_drm(struct _drmDevice * drm_device,struct vk_object_base * log_obj,enum nvk_debug debug_flags,struct nvkmd_pdev ** pdev_out)32 nvkmd_try_create_pdev_for_drm(struct _drmDevice *drm_device,
33                               struct vk_object_base *log_obj,
34                               enum nvk_debug debug_flags,
35                               struct nvkmd_pdev **pdev_out)
36 {
37    return nvkmd_nouveau_try_create_pdev(drm_device, log_obj,
38                                         debug_flags, pdev_out);
39 }
40 
41 VkResult
nvkmd_dev_alloc_mapped_mem(struct nvkmd_dev * dev,struct vk_object_base * log_obj,uint64_t size_B,uint64_t align_B,enum nvkmd_mem_flags flags,enum nvkmd_mem_map_flags map_flags,struct nvkmd_mem ** mem_out)42 nvkmd_dev_alloc_mapped_mem(struct nvkmd_dev *dev,
43                            struct vk_object_base *log_obj,
44                            uint64_t size_B, uint64_t align_B,
45                            enum nvkmd_mem_flags flags,
46                            enum nvkmd_mem_map_flags map_flags,
47                            struct nvkmd_mem **mem_out)
48 {
49    struct nvkmd_mem *mem;
50    VkResult result;
51 
52    result = nvkmd_dev_alloc_mem(dev, log_obj, size_B, align_B,
53                                 flags | NVKMD_MEM_CAN_MAP, &mem);
54    if (result != VK_SUCCESS)
55       return result;
56 
57    assert(!(map_flags & NVKMD_MEM_MAP_FIXED));
58    result = nvkmd_mem_map(mem, log_obj, map_flags, NULL, NULL);
59    if (result != VK_SUCCESS) {
60       mem->ops->free(mem);
61       return result;
62    }
63 
64    *mem_out = mem;
65 
66    return VK_SUCCESS;
67 }
68 
69 VkResult MUST_CHECK
nvkmd_dev_alloc_va(struct nvkmd_dev * dev,struct vk_object_base * log_obj,enum nvkmd_va_flags flags,uint8_t pte_kind,uint64_t size_B,uint64_t align_B,uint64_t fixed_addr,struct nvkmd_va ** va_out)70 nvkmd_dev_alloc_va(struct nvkmd_dev *dev,
71                    struct vk_object_base *log_obj,
72                    enum nvkmd_va_flags flags, uint8_t pte_kind,
73                    uint64_t size_B, uint64_t align_B,
74                    uint64_t fixed_addr, struct nvkmd_va **va_out)
75 {
76    VkResult result = dev->ops->alloc_va(dev, log_obj, flags, pte_kind,
77                                         size_B, align_B, fixed_addr, va_out);
78    if (result != VK_SUCCESS)
79       return result;
80 
81    if (unlikely(dev->pdev->debug_flags & NVK_DEBUG_VM)) {
82       const char *sparse = (flags & NVKMD_VA_SPARSE) ? " sparse" : "";
83       fprintf(stderr, "alloc va [0x%" PRIx64 ", 0x%" PRIx64 ")%s\n",
84               (*va_out)->addr, (*va_out)->addr + size_B, sparse);
85    }
86 
87    return VK_SUCCESS;
88 }
89 
90 void
nvkmd_va_free(struct nvkmd_va * va)91 nvkmd_va_free(struct nvkmd_va *va)
92 {
93    if (unlikely(va->dev->pdev->debug_flags & NVK_DEBUG_VM)) {
94       const char *sparse = (va->flags & NVKMD_VA_SPARSE) ? " sparse" : "";
95       fprintf(stderr, "free va [0x%" PRIx64 ", 0x%" PRIx64 ")%s\n",
96               va->addr, va->addr + va->size_B, sparse);
97    }
98 
99    va->ops->free(va);
100 }
101 
102 static inline void
log_va_bind_mem(struct nvkmd_va * va,uint64_t va_offset_B,struct nvkmd_mem * mem,uint64_t mem_offset_B,uint64_t range_B)103 log_va_bind_mem(struct nvkmd_va *va,
104                 uint64_t va_offset_B,
105                 struct nvkmd_mem *mem,
106                 uint64_t mem_offset_B,
107                 uint64_t range_B)
108 {
109    fprintf(stderr, "bind vma mem<0x%" PRIx32 ">"
110                    "[0x%" PRIx64 ", 0x%" PRIx64 ") to "
111                    "[0x%" PRIx64 ", 0x%" PRIx64 ")\n",
112            mem->ops->log_handle(mem),
113            mem_offset_B, mem_offset_B + range_B,
114            va->addr, va->addr + range_B);
115 }
116 
117 static inline void
log_va_unbind(struct nvkmd_va * va,uint64_t va_offset_B,uint64_t range_B)118 log_va_unbind(struct nvkmd_va *va,
119               uint64_t va_offset_B,
120               uint64_t range_B)
121 {
122    fprintf(stderr, "unbind vma [0x%" PRIx64 ", 0x%" PRIx64 ")\n",
123            va->addr, va->addr + range_B);
124 }
125 
126 VkResult MUST_CHECK
nvkmd_va_bind_mem(struct nvkmd_va * va,struct vk_object_base * log_obj,uint64_t va_offset_B,struct nvkmd_mem * mem,uint64_t mem_offset_B,uint64_t range_B)127 nvkmd_va_bind_mem(struct nvkmd_va *va,
128                   struct vk_object_base *log_obj,
129                   uint64_t va_offset_B,
130                   struct nvkmd_mem *mem,
131                   uint64_t mem_offset_B,
132                   uint64_t range_B)
133 {
134    assert(va_offset_B <= va->size_B);
135    assert(va_offset_B + range_B <= va->size_B);
136    assert(mem_offset_B <= mem->size_B);
137    assert(mem_offset_B + range_B <= mem->size_B);
138 
139    assert(va->addr % mem->bind_align_B == 0);
140    assert(va_offset_B % mem->bind_align_B == 0);
141    assert(mem_offset_B % mem->bind_align_B == 0);
142    assert(range_B % mem->bind_align_B == 0);
143 
144    if (unlikely(va->dev->pdev->debug_flags & NVK_DEBUG_VM))
145       log_va_bind_mem(va, va_offset_B, mem, mem_offset_B, range_B);
146 
147    return va->ops->bind_mem(va, log_obj, va_offset_B,
148                             mem, mem_offset_B, range_B);
149 }
150 
151 VkResult MUST_CHECK
nvkmd_va_unbind(struct nvkmd_va * va,struct vk_object_base * log_obj,uint64_t va_offset_B,uint64_t range_B)152 nvkmd_va_unbind(struct nvkmd_va *va,
153                 struct vk_object_base *log_obj,
154                 uint64_t va_offset_B,
155                 uint64_t range_B)
156 {
157    assert(va_offset_B <= va->size_B);
158    assert(va_offset_B + range_B <= va->size_B);
159 
160    if (unlikely(va->dev->pdev->debug_flags & NVK_DEBUG_VM))
161       log_va_unbind(va, va_offset_B, range_B);
162 
163    return va->ops->unbind(va, log_obj, va_offset_B, range_B);
164 }
165 
166 VkResult MUST_CHECK
nvkmd_ctx_bind(struct nvkmd_ctx * ctx,struct vk_object_base * log_obj,uint32_t bind_count,const struct nvkmd_ctx_bind * binds)167 nvkmd_ctx_bind(struct nvkmd_ctx *ctx,
168                struct vk_object_base *log_obj,
169                uint32_t bind_count,
170                const struct nvkmd_ctx_bind *binds)
171 {
172    for (uint32_t i = 0; i < bind_count; i++) {
173       assert(binds[i].va_offset_B <= binds[i].va->size_B);
174       assert(binds[i].va_offset_B + binds[i].range_B <= binds[i].va->size_B);
175       if (binds[i].op == NVKMD_BIND_OP_BIND) {
176          assert(binds[i].mem_offset_B % binds[i].mem->bind_align_B == 0);
177          assert(binds[i].mem_offset_B <= binds[i].mem->size_B);
178          assert(binds[i].mem_offset_B + binds[i].range_B <=
179                 binds[i].mem->size_B);
180 
181          assert(binds[i].va->addr % binds[i].mem->bind_align_B == 0);
182          assert(binds[i].va_offset_B % binds[i].mem->bind_align_B == 0);
183          assert(binds[i].mem_offset_B % binds[i].mem->bind_align_B == 0);
184          assert(binds[i].range_B % binds[i].mem->bind_align_B == 0);
185       } else {
186          assert(binds[i].mem == NULL);
187       }
188    }
189 
190    if (unlikely(ctx->dev->pdev->debug_flags & NVK_DEBUG_VM)) {
191       for (uint32_t i = 0; i < bind_count; i++) {
192          if (binds[i].op == NVKMD_BIND_OP_BIND) {
193             log_va_bind_mem(binds[i].va, binds[i].va_offset_B,
194                             binds[i].mem, binds[i].mem_offset_B,
195                             binds[i].range_B);
196          } else {
197             log_va_unbind(binds[i].va, binds[i].va_offset_B, binds[i].range_B);
198          }
199       }
200    }
201 
202    return ctx->ops->bind(ctx, log_obj, bind_count, binds);
203 }
204 
205 void
nvkmd_mem_unref(struct nvkmd_mem * mem)206 nvkmd_mem_unref(struct nvkmd_mem *mem)
207 {
208    assert(p_atomic_read(&mem->refcnt) > 0);
209    if (!p_atomic_dec_zero(&mem->refcnt))
210       return;
211 
212    if (mem->client_map != NULL)
213       mem->ops->unmap(mem, NVKMD_MEM_MAP_CLIENT, mem->client_map);
214 
215    if (mem->map != NULL)
216       mem->ops->unmap(mem, 0, mem->map);
217 
218    mem->ops->free(mem);
219 }
220 
221 VkResult
nvkmd_mem_map(struct nvkmd_mem * mem,struct vk_object_base * log_obj,enum nvkmd_mem_map_flags flags,void * fixed_addr,void ** map_out)222 nvkmd_mem_map(struct nvkmd_mem *mem, struct vk_object_base *log_obj,
223               enum nvkmd_mem_map_flags flags, void *fixed_addr,
224               void **map_out)
225 {
226    void *map = NULL;
227 
228    assert((fixed_addr == NULL) == !(flags & NVKMD_MEM_MAP_FIXED));
229 
230    if (flags & NVKMD_MEM_MAP_CLIENT) {
231       assert(mem->client_map == NULL);
232 
233       VkResult result = mem->ops->map(mem, log_obj, flags, fixed_addr, &map);
234       if (result != VK_SUCCESS)
235          return result;
236 
237       mem->client_map = map;
238    } else {
239       assert(!(flags & NVKMD_MEM_MAP_FIXED));
240 
241       simple_mtx_lock(&mem->map_mutex);
242 
243       assert((mem->map_cnt == 0) == (mem->map == NULL));
244       mem->map_cnt++;
245 
246       VkResult result = VK_SUCCESS;
247       if (mem->map == NULL) {
248          result = mem->ops->map(mem, log_obj, flags, NULL, &map);
249          if (result == VK_SUCCESS)
250             mem->map = map;
251       } else {
252          map = mem->map;
253       }
254 
255       simple_mtx_unlock(&mem->map_mutex);
256 
257       if (result != VK_SUCCESS)
258          return result;
259    }
260 
261    if (map_out != NULL)
262       *map_out = map;
263 
264    return VK_SUCCESS;
265 }
266 
267 void
nvkmd_mem_unmap(struct nvkmd_mem * mem,enum nvkmd_mem_map_flags flags)268 nvkmd_mem_unmap(struct nvkmd_mem *mem, enum nvkmd_mem_map_flags flags)
269 {
270    if (flags & NVKMD_MEM_MAP_CLIENT) {
271       assert(mem->client_map != NULL);
272       mem->ops->unmap(mem, flags, mem->client_map);
273       mem->client_map = NULL;
274    } else {
275       assert(mem->map != NULL);
276       simple_mtx_lock(&mem->map_mutex);
277       if (--mem->map_cnt == 0) {
278          mem->ops->unmap(mem, flags, mem->map);
279          mem->map = NULL;
280       }
281       simple_mtx_unlock(&mem->map_mutex);
282    }
283 }
284