xref: /aosp_15_r20/external/mesa3d/src/intel/vulkan/tests/state_pool_max_size.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2023 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <pthread.h>
25 
26 #include "anv_private.h"
27 #include "test_common.h"
28 
29 #define NUM_THREADS 8
30 #define STATES_PER_THREAD 1024
31 #define NUM_RUNS 1
32 
33 static struct job {
34    pthread_t thread;
35    uint32_t state_size;
36    uint32_t state_alignment;
37    struct anv_state_pool *pool;
38    struct anv_state states[STATES_PER_THREAD];
39 } jobs[NUM_THREADS];
40 
41 static pthread_barrier_t barrier;
42 
alloc_states(void * _job)43 static void *alloc_states(void *_job)
44 {
45    struct job *job = _job;
46 
47    pthread_barrier_wait(&barrier);
48 
49    for (unsigned i = 0; i < STATES_PER_THREAD; i++) {
50       struct anv_state state = anv_state_pool_alloc(job->pool,
51                                                     job->state_size,
52                                                     job->state_alignment);
53       job->states[i] = state;
54    }
55 
56    return NULL;
57 }
58 
run_test(uint32_t state_size,uint32_t state_alignment,uint32_t block_size,uint32_t pool_max_size)59 static void run_test(uint32_t state_size,
60                      uint32_t state_alignment,
61                      uint32_t block_size,
62                      uint32_t pool_max_size)
63 {
64    struct anv_physical_device physical_device = { };
65    struct anv_device device = {};
66    struct anv_state_pool state_pool;
67 
68    test_device_info_init(&physical_device.info);
69    anv_device_set_physical(&device, &physical_device);
70    device.kmd_backend = anv_kmd_backend_get(INTEL_KMD_TYPE_STUB);
71    pthread_mutex_init(&device.mutex, NULL);
72    anv_bo_cache_init(&device.bo_cache, &device);
73    anv_state_pool_init(&state_pool, &device,
74                        &(struct anv_state_pool_params) {
75                           .name         = "test",
76                           .base_address = 4096,
77                           .start_offset = 0,
78                           .block_size   = block_size,
79                           .max_size     = pool_max_size,
80                        });
81 
82    pthread_barrier_init(&barrier, NULL, NUM_THREADS);
83 
84    for (unsigned i = 0; i < ARRAY_SIZE(jobs); i++) {
85       jobs[i].state_size = state_size;
86       jobs[i].state_alignment = state_alignment;
87       jobs[i].pool = &state_pool;
88       pthread_create(&jobs[i].thread, NULL, alloc_states, &jobs[i]);
89    }
90 
91    for (unsigned i = 0; i < ARRAY_SIZE(jobs); i++)
92       pthread_join(jobs[i].thread, NULL);
93 
94    const uint32_t expected_allocation_fails =
95       (NUM_THREADS * STATES_PER_THREAD * block_size) > pool_max_size ?
96       ((NUM_THREADS * STATES_PER_THREAD) - (pool_max_size / block_size)) : 0;
97    uint32_t allocation_fails = 0;
98    for (unsigned j = 0; j < ARRAY_SIZE(jobs); j++) {
99       int64_t last_state_offset = -1;
100       for (unsigned s = 0; s < ARRAY_SIZE(jobs[j].states); s++) {
101          if (jobs[j].states[s].alloc_size) {
102             ASSERT(last_state_offset < jobs[j].states[s].offset);
103             last_state_offset = jobs[j].states[s].offset;
104          } else {
105             allocation_fails++;
106          }
107       }
108    }
109 
110    ASSERT(allocation_fails == expected_allocation_fails);
111 
112    anv_state_pool_finish(&state_pool);
113    anv_bo_cache_finish(&device.bo_cache);
114    pthread_mutex_destroy(&device.mutex);
115 }
116 
117 void state_pool_max_size_within_limit(void);
118 
state_pool_max_size_within_limit(void)119 void state_pool_max_size_within_limit(void)
120 {
121    for (unsigned i = 0; i < NUM_RUNS; i++)
122       run_test(16, 16, 64, 64 * NUM_THREADS * STATES_PER_THREAD);
123 }
124 
125 void state_pool_max_size_over_limit(void);
126 
state_pool_max_size_over_limit(void)127 void state_pool_max_size_over_limit(void)
128 {
129    for (unsigned i = 0; i < NUM_RUNS; i++)
130       run_test(16, 16, 64, 16 * NUM_THREADS * STATES_PER_THREAD);
131 }
132