xref: /aosp_15_r20/external/mesa3d/src/panfrost/vulkan/panvk_priv_bo.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2021 Collabora Ltd.
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include <assert.h>
7 
8 #include "vk_alloc.h"
9 
10 #include "panvk_device.h"
11 #include "panvk_priv_bo.h"
12 
13 #include "kmod/pan_kmod.h"
14 
15 #include "genxml/decode.h"
16 
17 struct panvk_priv_bo *
panvk_priv_bo_create(struct panvk_device * dev,size_t size,uint32_t flags,VkSystemAllocationScope scope)18 panvk_priv_bo_create(struct panvk_device *dev, size_t size, uint32_t flags,
19                      VkSystemAllocationScope scope)
20 {
21    int ret;
22    struct panvk_priv_bo *priv_bo =
23       vk_zalloc(&dev->vk.alloc, sizeof(*priv_bo), 8, scope);
24 
25    if (!priv_bo)
26       return NULL;
27 
28    struct pan_kmod_bo *bo =
29       pan_kmod_bo_alloc(dev->kmod.dev, dev->kmod.vm, size, flags);
30    if (!bo)
31       goto err_free_priv_bo;
32 
33    priv_bo->bo = bo;
34    priv_bo->dev = dev;
35 
36    if (!(flags & PAN_KMOD_BO_FLAG_NO_MMAP)) {
37       priv_bo->addr.host = pan_kmod_bo_mmap(
38          bo, 0, pan_kmod_bo_size(bo), PROT_READ | PROT_WRITE, MAP_SHARED, NULL);
39       if (priv_bo->addr.host == MAP_FAILED)
40          goto err_put_bo;
41    }
42 
43    struct pan_kmod_vm_op op = {
44       .type = PAN_KMOD_VM_OP_TYPE_MAP,
45       .va = {
46          .start = PAN_KMOD_VM_MAP_AUTO_VA,
47          .size = pan_kmod_bo_size(bo),
48       },
49       .map = {
50          .bo = priv_bo->bo,
51          .bo_offset = 0,
52       },
53    };
54 
55    if (!(dev->kmod.vm->flags & PAN_KMOD_VM_FLAG_AUTO_VA)) {
56       op.va.start = util_vma_heap_alloc(
57          &dev->as.heap, op.va.size, op.va.size > 0x200000 ? 0x200000 : 0x1000);
58       if (!op.va.start)
59          goto err_munmap_bo;
60    }
61 
62    ret = pan_kmod_vm_bind(dev->kmod.vm, PAN_KMOD_VM_OP_MODE_IMMEDIATE, &op, 1);
63    if (ret)
64       goto err_munmap_bo;
65 
66    priv_bo->addr.dev = op.va.start;
67 
68    if (dev->debug.decode_ctx) {
69       pandecode_inject_mmap(dev->debug.decode_ctx, priv_bo->addr.dev,
70                             priv_bo->addr.host, pan_kmod_bo_size(priv_bo->bo),
71                             NULL);
72    }
73 
74    p_atomic_set(&priv_bo->refcnt, 1);
75 
76    return priv_bo;
77 
78 err_munmap_bo:
79    if (priv_bo->addr.host) {
80       ret = os_munmap(priv_bo->addr.host, pan_kmod_bo_size(bo));
81       assert(!ret);
82    }
83 
84 err_put_bo:
85    pan_kmod_bo_put(bo);
86 
87 err_free_priv_bo:
88    vk_free(&dev->vk.alloc, priv_bo);
89    return NULL;
90 }
91 
92 static void
panvk_priv_bo_destroy(struct panvk_priv_bo * priv_bo)93 panvk_priv_bo_destroy(struct panvk_priv_bo *priv_bo)
94 {
95    struct panvk_device *dev = priv_bo->dev;
96 
97    if (dev->debug.decode_ctx) {
98       pandecode_inject_free(dev->debug.decode_ctx, priv_bo->addr.dev,
99                             pan_kmod_bo_size(priv_bo->bo));
100    }
101 
102    struct pan_kmod_vm_op op = {
103       .type = PAN_KMOD_VM_OP_TYPE_UNMAP,
104       .va = {
105          .start = priv_bo->addr.dev,
106          .size = pan_kmod_bo_size(priv_bo->bo),
107       },
108    };
109    ASSERTED int ret =
110       pan_kmod_vm_bind(dev->kmod.vm, PAN_KMOD_VM_OP_MODE_IMMEDIATE, &op, 1);
111    assert(!ret);
112 
113    if (!(dev->kmod.vm->flags & PAN_KMOD_VM_FLAG_AUTO_VA))
114       util_vma_heap_free(&dev->as.heap, op.va.start, op.va.size);
115 
116    if (priv_bo->addr.host) {
117       ret = os_munmap(priv_bo->addr.host, pan_kmod_bo_size(priv_bo->bo));
118       assert(!ret);
119    }
120 
121    pan_kmod_bo_put(priv_bo->bo);
122    vk_free(&dev->vk.alloc, priv_bo);
123 }
124 
125 void
panvk_priv_bo_unref(struct panvk_priv_bo * priv_bo)126 panvk_priv_bo_unref(struct panvk_priv_bo *priv_bo)
127 {
128    if (!priv_bo || p_atomic_dec_return(&priv_bo->refcnt))
129       return;
130 
131    panvk_priv_bo_destroy(priv_bo);
132 }
133