1 /*
2 * Copyright © 2022 Imagination Technologies Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <stdint.h>
27 #include <vulkan/vulkan.h>
28 #include <xf86drm.h>
29
30 #include "pvr_private.h"
31 #include "pvr_types.h"
32 #include "pvr_winsys.h"
33 #include "pvr_winsys_helper.h"
34 #include "util/u_atomic.h"
35 #include "vk_log.h"
36
pvr_winsys_helper_display_buffer_create(struct pvr_winsys * const ws,uint64_t size,uint32_t * const handle_out)37 VkResult pvr_winsys_helper_display_buffer_create(struct pvr_winsys *const ws,
38 uint64_t size,
39 uint32_t *const handle_out)
40 {
41 struct drm_mode_create_dumb args = {
42 .width = size,
43 .height = 1,
44 .bpp = 8,
45 };
46 VkResult result;
47
48 result = pvr_ioctl(ws->display_fd,
49 DRM_IOCTL_MODE_CREATE_DUMB,
50 &args,
51 VK_ERROR_OUT_OF_DEVICE_MEMORY);
52 if (result != VK_SUCCESS)
53 return result;
54
55 *handle_out = args.handle;
56
57 return VK_SUCCESS;
58 }
59
pvr_winsys_helper_display_buffer_destroy(struct pvr_winsys * ws,uint32_t handle)60 VkResult pvr_winsys_helper_display_buffer_destroy(struct pvr_winsys *ws,
61 uint32_t handle)
62 {
63 struct drm_mode_destroy_dumb args = {
64 .handle = handle,
65 };
66
67 return pvr_ioctl(ws->display_fd,
68 DRM_IOCTL_MODE_DESTROY_DUMB,
69 &args,
70 VK_ERROR_UNKNOWN);
71 }
72
pvr_winsys_helper_winsys_heap_finish(struct pvr_winsys_heap * const heap)73 bool pvr_winsys_helper_winsys_heap_finish(struct pvr_winsys_heap *const heap)
74 {
75 if (p_atomic_read(&heap->ref_count) != 0)
76 return false;
77
78 pthread_mutex_destroy(&heap->lock);
79 util_vma_heap_finish(&heap->vma_heap);
80
81 return true;
82 }
83
pvr_winsys_helper_heap_alloc(struct pvr_winsys_heap * const heap,uint64_t size,uint64_t alignment,struct pvr_winsys_vma * const vma_out)84 VkResult pvr_winsys_helper_heap_alloc(struct pvr_winsys_heap *const heap,
85 uint64_t size,
86 uint64_t alignment,
87 struct pvr_winsys_vma *const vma_out)
88 {
89 struct pvr_winsys_vma vma = {
90 .heap = heap,
91 };
92
93 assert(util_is_power_of_two_nonzero64(alignment));
94
95 /* pvr_srv_winsys_buffer_create() page aligns the size. We must do the same
96 * here to ensure enough heap space is allocated to be able to map the
97 * buffer to the GPU.
98 * We have to do this for the powervr kernel mode driver as well, as it
99 * returns a page aligned size when allocating buffers.
100 */
101 alignment = MAX2(alignment, heap->page_size);
102
103 size = ALIGN_POT(size, alignment);
104 vma.size = size;
105
106 pthread_mutex_lock(&heap->lock);
107 vma.dev_addr =
108 PVR_DEV_ADDR(util_vma_heap_alloc(&heap->vma_heap, size, heap->page_size));
109 pthread_mutex_unlock(&heap->lock);
110
111 if (!vma.dev_addr.addr)
112 return vk_error(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY);
113
114 p_atomic_inc(&heap->ref_count);
115
116 *vma_out = vma;
117
118 return VK_SUCCESS;
119 }
120
pvr_winsys_helper_heap_free(struct pvr_winsys_vma * const vma)121 void pvr_winsys_helper_heap_free(struct pvr_winsys_vma *const vma)
122 {
123 struct pvr_winsys_heap *const heap = vma->heap;
124
125 /* A vma with an existing device mapping should not be freed. */
126 assert(!vma->bo);
127
128 pthread_mutex_lock(&heap->lock);
129 util_vma_heap_free(&heap->vma_heap, vma->dev_addr.addr, vma->size);
130 pthread_mutex_unlock(&heap->lock);
131
132 p_atomic_dec(&heap->ref_count);
133 }
134
135 /* Note: the function assumes the heap allocation in the carveout memory area
136 * can be freed with the regular heap allocation free function. The free
137 * function gets called on mapping failure.
138 */
139 static VkResult
pvr_buffer_create_and_map(struct pvr_winsys * const ws,heap_alloc_carveout_func heap_alloc_carveout,struct pvr_winsys_heap * heap,pvr_dev_addr_t dev_addr,uint64_t size,uint64_t alignment,struct pvr_winsys_vma ** const vma_out)140 pvr_buffer_create_and_map(struct pvr_winsys *const ws,
141 heap_alloc_carveout_func heap_alloc_carveout,
142 struct pvr_winsys_heap *heap,
143 pvr_dev_addr_t dev_addr,
144 uint64_t size,
145 uint64_t alignment,
146 struct pvr_winsys_vma **const vma_out)
147 {
148 struct pvr_winsys_vma *vma;
149 struct pvr_winsys_bo *bo;
150 VkResult result;
151
152 /* Address should not be NULL, this function is used to allocate and map
153 * carveout addresses and is only supposed to be used internally.
154 */
155 assert(dev_addr.addr);
156
157 result = ws->ops->buffer_create(ws,
158 size,
159 alignment,
160 PVR_WINSYS_BO_TYPE_GPU,
161 PVR_WINSYS_BO_FLAG_CPU_ACCESS,
162 &bo);
163 if (result != VK_SUCCESS)
164 goto err_out;
165
166 result = heap_alloc_carveout(heap, dev_addr, size, alignment, &vma);
167 if (result != VK_SUCCESS)
168 goto err_pvr_winsys_buffer_destroy;
169
170 result = ws->ops->vma_map(vma, bo, 0, size, NULL);
171 if (result != VK_SUCCESS)
172 goto err_pvr_winsys_heap_free;
173
174 /* Note this won't destroy bo as its being used by VMA, once vma is
175 * unmapped, bo will be destroyed automatically.
176 */
177 ws->ops->buffer_destroy(bo);
178
179 *vma_out = vma;
180
181 return VK_SUCCESS;
182
183 err_pvr_winsys_heap_free:
184 ws->ops->heap_free(vma);
185
186 err_pvr_winsys_buffer_destroy:
187 ws->ops->buffer_destroy(bo);
188
189 err_out:
190 return result;
191 }
192
pvr_buffer_destroy_and_unmap(struct pvr_winsys_vma * vma)193 static void inline pvr_buffer_destroy_and_unmap(struct pvr_winsys_vma *vma)
194 {
195 const struct pvr_winsys *const ws = vma->heap->ws;
196
197 /* Buffer object associated with the vma will be automatically destroyed
198 * once vma is unmapped.
199 */
200 ws->ops->vma_unmap(vma);
201 ws->ops->heap_free(vma);
202 }
203
pvr_winsys_helper_allocate_static_memory(struct pvr_winsys * const ws,heap_alloc_carveout_func heap_alloc_carveout,struct pvr_winsys_heap * const general_heap,struct pvr_winsys_heap * const pds_heap,struct pvr_winsys_heap * const usc_heap,struct pvr_winsys_vma ** const general_vma_out,struct pvr_winsys_vma ** const pds_vma_out,struct pvr_winsys_vma ** const usc_vma_out)204 VkResult pvr_winsys_helper_allocate_static_memory(
205 struct pvr_winsys *const ws,
206 heap_alloc_carveout_func heap_alloc_carveout,
207 struct pvr_winsys_heap *const general_heap,
208 struct pvr_winsys_heap *const pds_heap,
209 struct pvr_winsys_heap *const usc_heap,
210 struct pvr_winsys_vma **const general_vma_out,
211 struct pvr_winsys_vma **const pds_vma_out,
212 struct pvr_winsys_vma **const usc_vma_out)
213 {
214 struct pvr_winsys_vma *general_vma;
215 struct pvr_winsys_vma *pds_vma;
216 struct pvr_winsys_vma *usc_vma;
217 VkResult result;
218
219 result = pvr_buffer_create_and_map(ws,
220 heap_alloc_carveout,
221 general_heap,
222 general_heap->static_data_carveout_addr,
223 general_heap->static_data_carveout_size,
224 general_heap->page_size,
225 &general_vma);
226 if (result != VK_SUCCESS)
227 goto err_out;
228
229 result = pvr_buffer_create_and_map(ws,
230 heap_alloc_carveout,
231 pds_heap,
232 pds_heap->static_data_carveout_addr,
233 pds_heap->static_data_carveout_size,
234 pds_heap->page_size,
235 &pds_vma);
236 if (result != VK_SUCCESS)
237 goto err_pvr_buffer_destroy_and_unmap_general;
238
239 result = pvr_buffer_create_and_map(ws,
240 heap_alloc_carveout,
241 usc_heap,
242 usc_heap->static_data_carveout_addr,
243 pds_heap->static_data_carveout_size,
244 usc_heap->page_size,
245 &usc_vma);
246 if (result != VK_SUCCESS)
247 goto err_pvr_buffer_destroy_and_unmap_pds;
248
249 *general_vma_out = general_vma;
250 *pds_vma_out = pds_vma;
251 *usc_vma_out = usc_vma;
252
253 return VK_SUCCESS;
254
255 err_pvr_buffer_destroy_and_unmap_pds:
256 pvr_buffer_destroy_and_unmap(pds_vma);
257
258 err_pvr_buffer_destroy_and_unmap_general:
259 pvr_buffer_destroy_and_unmap(general_vma);
260
261 err_out:
262 return result;
263 }
264
pvr_winsys_helper_free_static_memory(struct pvr_winsys_vma * const general_vma,struct pvr_winsys_vma * const pds_vma,struct pvr_winsys_vma * const usc_vma)265 void pvr_winsys_helper_free_static_memory(
266 struct pvr_winsys_vma *const general_vma,
267 struct pvr_winsys_vma *const pds_vma,
268 struct pvr_winsys_vma *const usc_vma)
269 {
270 pvr_buffer_destroy_and_unmap(usc_vma);
271 pvr_buffer_destroy_and_unmap(pds_vma);
272 pvr_buffer_destroy_and_unmap(general_vma);
273 }
274
pvr_setup_static_vdm_sync(uint8_t * const pds_ptr,uint64_t pds_sync_offset_in_bytes,uint8_t * const usc_ptr,uint64_t usc_sync_offset_in_bytes)275 static void pvr_setup_static_vdm_sync(uint8_t *const pds_ptr,
276 uint64_t pds_sync_offset_in_bytes,
277 uint8_t *const usc_ptr,
278 uint64_t usc_sync_offset_in_bytes)
279 {
280 /* TODO: this needs to be auto-generated */
281 const uint8_t state_update[] = { 0x44, 0xA0, 0x80, 0x05,
282 0x00, 0x00, 0x00, 0xFF };
283
284 struct pvr_pds_kickusc_program ppp_state_update_program = { 0 };
285
286 memcpy(usc_ptr + usc_sync_offset_in_bytes,
287 state_update,
288 sizeof(state_update));
289
290 pvr_pds_setup_doutu(&ppp_state_update_program.usc_task_control,
291 usc_sync_offset_in_bytes,
292 0,
293 PVRX(PDSINST_DOUTU_SAMPLE_RATE_INSTANCE),
294 false);
295
296 pvr_pds_kick_usc(&ppp_state_update_program,
297 (uint32_t *)&pds_ptr[pds_sync_offset_in_bytes],
298 0,
299 false,
300 PDS_GENERATE_CODEDATA_SEGMENTS);
301 }
302
303 static void
pvr_setup_static_pixel_event_program(uint8_t * const pds_ptr,uint64_t pds_eot_offset_in_bytes)304 pvr_setup_static_pixel_event_program(uint8_t *const pds_ptr,
305 uint64_t pds_eot_offset_in_bytes)
306 {
307 struct pvr_pds_event_program pixel_event_program = { 0 };
308
309 pvr_pds_generate_pixel_event(&pixel_event_program,
310 (uint32_t *)&pds_ptr[pds_eot_offset_in_bytes],
311 PDS_GENERATE_CODE_SEGMENT,
312 NULL);
313 }
314
315 VkResult
pvr_winsys_helper_fill_static_memory(struct pvr_winsys * const ws,struct pvr_winsys_vma * const general_vma,struct pvr_winsys_vma * const pds_vma,struct pvr_winsys_vma * const usc_vma)316 pvr_winsys_helper_fill_static_memory(struct pvr_winsys *const ws,
317 struct pvr_winsys_vma *const general_vma,
318 struct pvr_winsys_vma *const pds_vma,
319 struct pvr_winsys_vma *const usc_vma)
320 {
321 VkResult result;
322
323 result = ws->ops->buffer_map(general_vma->bo);
324 if (result != VK_SUCCESS)
325 goto err_out;
326
327 result = ws->ops->buffer_map(pds_vma->bo);
328 if (result != VK_SUCCESS)
329 goto err_pvr_srv_winsys_buffer_unmap_general;
330
331 result = ws->ops->buffer_map(usc_vma->bo);
332 if (result != VK_SUCCESS)
333 goto err_pvr_srv_winsys_buffer_unmap_pds;
334
335 pvr_setup_static_vdm_sync(pds_vma->bo->map,
336 pds_vma->heap->static_data_offsets.vdm_sync,
337 usc_vma->bo->map,
338 usc_vma->heap->static_data_offsets.vdm_sync);
339
340 pvr_setup_static_pixel_event_program(pds_vma->bo->map,
341 pds_vma->heap->static_data_offsets.eot);
342
343 ws->ops->buffer_unmap(usc_vma->bo);
344 ws->ops->buffer_unmap(pds_vma->bo);
345 ws->ops->buffer_unmap(general_vma->bo);
346
347 return VK_SUCCESS;
348
349 err_pvr_srv_winsys_buffer_unmap_pds:
350 ws->ops->buffer_unmap(pds_vma->bo);
351
352 err_pvr_srv_winsys_buffer_unmap_general:
353 ws->ops->buffer_unmap(general_vma->bo);
354
355 err_out:
356 return result;
357 }
358