xref: /aosp_15_r20/external/mesa3d/src/intel/vulkan/xe/anv_device.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2023 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included
12  * in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "xe/anv_device.h"
24 #include "anv_private.h"
25 
26 #include "drm-uapi/gpu_scheduler.h"
27 #include "drm-uapi/xe_drm.h"
28 
29 #include "common/xe/intel_device_query.h"
30 
anv_xe_device_destroy_vm(struct anv_device * device)31 bool anv_xe_device_destroy_vm(struct anv_device *device)
32 {
33    struct drm_xe_vm_destroy destroy = {
34       .vm_id = device->vm_id,
35    };
36 
37    intel_bind_timeline_finish(&device->bind_timeline, device->fd);
38 
39    return intel_ioctl(device->fd, DRM_IOCTL_XE_VM_DESTROY, &destroy) == 0;
40 }
41 
anv_xe_device_setup_vm(struct anv_device * device)42 VkResult anv_xe_device_setup_vm(struct anv_device *device)
43 {
44    struct drm_xe_vm_create create = {
45       .flags = DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE,
46    };
47    if (intel_ioctl(device->fd, DRM_IOCTL_XE_VM_CREATE, &create) != 0)
48       return vk_errorf(device, VK_ERROR_INITIALIZATION_FAILED,
49                        "vm creation failed");
50 
51    device->vm_id = create.vm_id;
52 
53    if (!intel_bind_timeline_init(&device->bind_timeline, device->fd)) {
54       anv_xe_device_destroy_vm(device);
55       return vk_errorf(device, VK_ERROR_INITIALIZATION_FAILED,
56                        "intel_bind_timeline_init failed");
57    }
58 
59    return VK_SUCCESS;
60 }
61 
62 static VkQueueGlobalPriorityKHR
drm_sched_priority_to_vk_priority(enum drm_sched_priority drm_sched_priority)63 drm_sched_priority_to_vk_priority(enum drm_sched_priority drm_sched_priority)
64 {
65    switch (drm_sched_priority) {
66    case DRM_SCHED_PRIORITY_MIN:
67       return VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR;
68    case DRM_SCHED_PRIORITY_NORMAL:
69       return VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR;
70    case DRM_SCHED_PRIORITY_HIGH:
71       return VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR;
72    default:
73       unreachable("Invalid drm_sched_priority");
74       return VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR;
75    }
76 }
77 
78 VkResult
anv_xe_physical_device_get_parameters(struct anv_physical_device * device)79 anv_xe_physical_device_get_parameters(struct anv_physical_device *device)
80 {
81    struct drm_xe_query_config *config;
82 
83    config = xe_device_query_alloc_fetch(device->local_fd, DRM_XE_DEVICE_QUERY_CONFIG, NULL);
84    if (!config)
85       return vk_errorf(device, VK_ERROR_INITIALIZATION_FAILED,
86                        "unable to query device config");
87 
88    device->has_exec_timeline = true;
89    device->has_vm_control = true;
90    device->max_context_priority =
91          drm_sched_priority_to_vk_priority(config->info[DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY]);
92 
93    free(config);
94    return VK_SUCCESS;
95 }
96 
97 VkResult
anv_xe_physical_device_init_memory_types(struct anv_physical_device * device)98 anv_xe_physical_device_init_memory_types(struct anv_physical_device *device)
99 {
100    if (anv_physical_device_has_vram(device)) {
101       if (device->info.ver >= 20 && !INTEL_DEBUG(DEBUG_NO_CCS)) {
102          device->memory.types[device->memory.type_count++] = (struct anv_memory_type) {
103             .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
104             .heapIndex = 0,
105             .compressed = true,
106          };
107       }
108       device->memory.types[device->memory.type_count++] = (struct anv_memory_type) {
109          .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
110          .heapIndex = 0,
111       };
112       device->memory.types[device->memory.type_count++] = (struct anv_memory_type) {
113          .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
114                           VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
115                           VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
116          .heapIndex = 1,
117       };
118       device->memory.types[device->memory.type_count++] = (struct anv_memory_type) {
119          .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
120                           VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
121                           VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
122          /* This memory type either comes from heaps[0] if there is only
123           * mappable vram region, or from heaps[2] if there is both mappable &
124           * non-mappable vram regions.
125           */
126          .heapIndex = device->vram_non_mappable.size > 0 ? 2 : 0,
127       };
128    } else if (device->info.has_llc) {
129       /* Big core GPUs share LLC with the CPU and thus one memory type can be
130        * both cached and coherent at the same time.
131        *
132        * But some game engines can't handle single type well
133        * https://gitlab.freedesktop.org/mesa/mesa/-/issues/7360#note_1719438
134        *
135        * TODO: But with current UAPI we can't change the mmap mode in Xe, so
136        * here only supporting two memory types.
137        */
138       device->memory.type_count = 2;
139       device->memory.types[0] = (struct anv_memory_type) {
140          .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
141          .heapIndex = 0,
142       };
143       device->memory.types[1] = (struct anv_memory_type) {
144          .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
145                           VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
146                           VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
147                           VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
148          .heapIndex = 0,
149       };
150    } else {
151       if (device->info.ver >= 20 && !INTEL_DEBUG(DEBUG_NO_CCS)) {
152          device->memory.types[device->memory.type_count++] = (struct anv_memory_type) {
153             .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
154             .heapIndex = 0,
155             .compressed = true,
156          };
157       }
158       device->memory.types[device->memory.type_count++] = (struct anv_memory_type) {
159          .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
160                           VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
161                           VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
162          .heapIndex = 0,
163       };
164       device->memory.types[device->memory.type_count++] = (struct anv_memory_type) {
165          .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
166                           VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
167                           VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
168                           VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
169          .heapIndex = 0,
170       };
171    }
172    return VK_SUCCESS;
173 }
174 
175 static VkResult
anv_xe_get_device_status(struct anv_device * device,uint32_t exec_queue_id)176 anv_xe_get_device_status(struct anv_device *device, uint32_t exec_queue_id)
177 {
178    VkResult result = VK_SUCCESS;
179    struct drm_xe_exec_queue_get_property exec_queue_get_property = {
180       .exec_queue_id = exec_queue_id,
181       .property = DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN,
182    };
183    int ret = intel_ioctl(device->fd, DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY,
184                          &exec_queue_get_property);
185 
186    if (ret || exec_queue_get_property.value)
187       result = vk_device_set_lost(&device->vk, "One or more queues banned");
188 
189    return result;
190 }
191 
192 VkResult
anv_xe_device_check_status(struct vk_device * vk_device)193 anv_xe_device_check_status(struct vk_device *vk_device)
194 {
195    struct anv_device *device = container_of(vk_device, struct anv_device, vk);
196    VkResult result = VK_SUCCESS;
197 
198    for (uint32_t i = 0; i < device->queue_count; i++) {
199       result = anv_xe_get_device_status(device, device->queues[i].exec_queue_id);
200       if (result != VK_SUCCESS)
201          return result;
202 
203       if (device->queues[i].companion_rcs_id != 0) {
204          uint32_t exec_queue_id = device->queues[i].companion_rcs_id;
205          result = anv_xe_get_device_status(device, exec_queue_id);
206          if (result != VK_SUCCESS)
207             return result;
208       }
209    }
210 
211    return result;
212 }
213