1 /*
2 * Copyright © 2023 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 #include "util/u_math.h"
27
28 static uint64_t
va_add(struct anv_va_range * range,uint64_t addr,uint64_t size)29 va_add(struct anv_va_range *range, uint64_t addr, uint64_t size)
30 {
31 range->addr = addr;
32 range->size = size;
33
34 return addr + size;
35 }
36
37 static void
va_at(struct anv_va_range * range,uint64_t addr,uint64_t size)38 va_at(struct anv_va_range *range, uint64_t addr, uint64_t size)
39 {
40 range->addr = addr;
41 range->size = size;
42 }
43
44 static void
anv_device_print_vas(struct anv_physical_device * device)45 anv_device_print_vas(struct anv_physical_device *device)
46 {
47 fprintf(stderr, "Driver heaps:\n");
48 #define PRINT_HEAP(name) \
49 fprintf(stderr, " 0x%016"PRIx64"-0x%016"PRIx64": %s\n", \
50 device->va.name.addr, \
51 device->va.name.addr + device->va.name.size, \
52 #name);
53 PRINT_HEAP(general_state_pool);
54 PRINT_HEAP(low_heap);
55 PRINT_HEAP(binding_table_pool);
56 PRINT_HEAP(internal_surface_state_pool);
57 PRINT_HEAP(scratch_surface_state_pool);
58 PRINT_HEAP(bindless_surface_state_pool);
59 PRINT_HEAP(indirect_descriptor_pool);
60 PRINT_HEAP(indirect_push_descriptor_pool);
61 PRINT_HEAP(instruction_state_pool);
62 PRINT_HEAP(dynamic_state_pool);
63 PRINT_HEAP(dynamic_visible_pool);
64 PRINT_HEAP(push_descriptor_buffer_pool);
65 PRINT_HEAP(high_heap);
66 PRINT_HEAP(trtt);
67 }
68
69 void
anv_physical_device_init_va_ranges(struct anv_physical_device * device)70 anv_physical_device_init_va_ranges(struct anv_physical_device *device)
71 {
72 /* anv Virtual Memory Layout
73 * =========================
74 *
75 * When the anv driver is determining the virtual graphics addresses of
76 * memory objects itself using the softpin mechanism, the following memory
77 * ranges will be used.
78 *
79 * Three special considerations to notice:
80 *
81 * (1) the dynamic state pool is located within the same 4 GiB as the low
82 * heap. This is to work around a VF cache issue described in a comment in
83 * anv_physical_device_init_heaps.
84 *
85 * (2) the binding table pool is located at lower addresses than the BT
86 * (binding table) surface state pool, within a 4 GiB range which also
87 * contains the bindless surface state pool. This allows surface state base
88 * addresses to cover both binding tables (16 bit offsets), the internal
89 * surface states (32 bit offsets) and the bindless surface states.
90 *
91 * (3) the last 4 GiB of the address space is withheld from the high heap.
92 * Various hardware units will read past the end of an object for various
93 * reasons. This healthy margin prevents reads from wrapping around 48-bit
94 * addresses.
95 */
96 uint64_t _1Mb = 1ull * 1024 * 1024;
97 uint64_t _1Gb = 1ull * 1024 * 1024 * 1024;
98 uint64_t _4Gb = 4ull * 1024 * 1024 * 1024;
99
100 uint64_t address = 0x000000200000ULL; /* 2MiB */
101
102 address = va_add(&device->va.general_state_pool, address,
103 _1Gb - address);
104
105 address = va_add(&device->va.low_heap, address, _1Gb);
106
107 /* The binding table pool has to be located directly in front of the
108 * surface states.
109 */
110 address += _1Gb;
111 address = va_add(&device->va.binding_table_pool, address, _1Gb);
112 address = va_add(&device->va.internal_surface_state_pool, address, 1 * _1Gb);
113 assert(device->va.internal_surface_state_pool.addr ==
114 align64(device->va.internal_surface_state_pool.addr, 2 * _1Gb));
115 /* Scratch surface state overlaps with the internal surface state */
116 va_at(&device->va.scratch_surface_state_pool,
117 device->va.internal_surface_state_pool.addr,
118 8 * _1Mb);
119 address = va_add(&device->va.bindless_surface_state_pool, address, 2 * _1Gb);
120
121 if (device->indirect_descriptors) {
122 /* With indirect descriptors, descriptor buffers can go anywhere, they
123 * just need to be in a 4Gb aligned range, so all shader accesses can
124 * use a relocatable upper dword for the 64bit address.
125 */
126 address = align64(address, _4Gb);
127 address = va_add(&device->va.indirect_descriptor_pool, address, 3 * _1Gb);
128 address = va_add(&device->va.indirect_push_descriptor_pool, address, _1Gb);
129 }
130
131 /* We use a trick to compute constant data offsets in the shaders to avoid
132 * unnecessary 64bit address computations (see lower_load_constant() in
133 * anv_nir_apply_pipeline_layout.c). This assumes the instruction pool is
134 * located at an address with the lower 32bits at 0.
135 */
136 address = align64(address, _4Gb);
137 address = va_add(&device->va.instruction_state_pool, address, 2 * _1Gb);
138
139 address += 1 * _1Gb;
140 address = va_add(&device->va.dynamic_state_pool, address, _1Gb);
141 address = va_add(&device->va.dynamic_visible_pool, address,
142 device->info.verx10 >= 125 ? (2 * _1Gb) : (3 * _1Gb - 4096));
143 assert(device->va.dynamic_visible_pool.addr % _4Gb == 0);
144 if (device->info.verx10 >= 125)
145 address = va_add(&device->va.push_descriptor_buffer_pool, address, _1Gb - 4096);
146
147 address = align64(address, device->info.mem_alignment);
148 address = va_add(&device->va.aux_tt_pool, address, 2 * _1Gb);
149
150 /* What's left to do for us is to set va.high_heap and va.trtt without
151 * overlap, but there are a few things to be considered:
152 *
153 * The TR-TT address space is governed by the GFX_TRTT_VA_RANGE register,
154 * which carves out part of the address space for TR-TT and is independent
155 * of device->gtt_size. We use 47:44 for gen9+, the values we set here
156 * should be in sync with what we write to the register.
157 *
158 * If we ever gain the capability to use more than 48 bits of address space
159 * we'll have to adjust where we put the TR-TT space (and how we set
160 * GFX_TRTT_VA_RANGE).
161 *
162 * We have to leave the last 4GiB out of the high vma range, so that no
163 * state base address + size can overflow 48 bits. For more information see
164 * the comment about Wa32bitGeneralStateOffset in anv_allocator.c
165 *
166 * Despite the comment above, before we had TR-TT we were not only avoiding
167 * the last 4GiB of the 48bit address space, but also avoiding the last
168 * 4GiB from gtt_size, so let's be on the safe side and do the 4GiB
169 * avoiding for both the TR-TT space top and the gtt top.
170 */
171 assert(device->gtt_size <= (1uLL << 48));
172 uint64_t trtt_start = 0xFuLL << 44;
173 uint64_t trtt_end = (1uLL << 48) - 4 * _1Gb;
174 uint64_t addressable_top = MIN2(device->gtt_size, trtt_start) - 4 * _1Gb;
175
176 uint64_t user_heaps_size = addressable_top - address;
177 address = va_add(&device->va.high_heap, address, user_heaps_size);
178 assert(address <= trtt_start);
179 address = va_add(&device->va.trtt, trtt_start, trtt_end - trtt_start);
180
181 if (INTEL_DEBUG(DEBUG_HEAPS))
182 anv_device_print_vas(device);
183 }
184