1 /*
2 * Copyright © 2022 Collabora Ltd. and Red Hat Inc.
3 * SPDX-License-Identifier: MIT
4 */
5 #include "nvk_mme.h"
6
7 #include "nvk_private.h"
8
9 #include "mme_sim.h"
10
11 static const nvk_mme_builder_func mme_builders[NVK_MME_COUNT] = {
12 [NVK_MME_SELECT_CB0] = nvk_mme_select_cb0,
13 [NVK_MME_BIND_CBUF_DESC] = nvk_mme_bind_cbuf_desc,
14 [NVK_MME_CLEAR] = nvk_mme_clear,
15 [NVK_MME_BIND_IB] = nvk_mme_bind_ib,
16 [NVK_MME_BIND_VB] = nvk_mme_bind_vb,
17 [NVK_MME_SET_VB_ENABLES] = nvk_mme_set_vb_enables,
18 [NVK_MME_SET_VB_STRIDE] = nvk_mme_set_vb_stride,
19 [NVK_MME_SET_TESS_PARAMS] = nvk_mme_set_tess_params,
20 [NVK_MME_SET_ANTI_ALIAS] = nvk_mme_set_anti_alias,
21 [NVK_MME_DRAW] = nvk_mme_draw,
22 [NVK_MME_DRAW_INDEXED] = nvk_mme_draw_indexed,
23 [NVK_MME_DRAW_INDIRECT] = nvk_mme_draw_indirect,
24 [NVK_MME_DRAW_INDEXED_INDIRECT] = nvk_mme_draw_indexed_indirect,
25 [NVK_MME_DRAW_INDIRECT_COUNT] = nvk_mme_draw_indirect_count,
26 [NVK_MME_DRAW_INDEXED_INDIRECT_COUNT] = nvk_mme_draw_indexed_indirect_count,
27 [NVK_MME_ADD_CS_INVOCATIONS] = nvk_mme_add_cs_invocations,
28 [NVK_MME_DISPATCH_INDIRECT] = nvk_mme_dispatch_indirect,
29 [NVK_MME_WRITE_CS_INVOCATIONS] = nvk_mme_write_cs_invocations,
30 [NVK_MME_XFB_COUNTER_LOAD] = nvk_mme_xfb_counter_load,
31 [NVK_MME_XFB_DRAW_INDIRECT] = nvk_mme_xfb_draw_indirect,
32 [NVK_MME_SET_PRIV_REG] = nvk_mme_set_priv_reg,
33 [NVK_MME_SET_WRITE_MASK] = nvk_mme_set_write_mask,
34 [NVK_MME_SET_CONSERVATIVE_RASTER_STATE] = nvk_mme_set_conservative_raster_state,
35 [NVK_MME_SET_VIEWPORT_MIN_MAX_Z] = nvk_mme_set_viewport_min_max_z,
36 [NVK_MME_SET_Z_CLAMP] = nvk_mme_set_z_clamp,
37 };
38
39 static const struct nvk_mme_test_case *mme_tests[NVK_MME_COUNT] = {
40 [NVK_MME_CLEAR] = nvk_mme_clear_tests,
41 [NVK_MME_BIND_VB] = nvk_mme_bind_vb_tests,
42 [NVK_MME_SET_TESS_PARAMS] = nvk_mme_set_tess_params_tests,
43 [NVK_MME_SET_ANTI_ALIAS] = nvk_mme_set_anti_alias_tests,
44 };
45
46 uint32_t *
nvk_build_mme(const struct nv_device_info * devinfo,enum nvk_mme mme,size_t * size_out)47 nvk_build_mme(const struct nv_device_info *devinfo,
48 enum nvk_mme mme, size_t *size_out)
49 {
50 struct mme_builder b;
51 mme_builder_init(&b, devinfo);
52
53 mme_builders[mme](&b);
54
55 return mme_builder_finish(&b, size_out);
56 }
57
58 struct nvk_mme_test_state {
59 const struct nvk_mme_test_case *test;
60 struct nvk_mme_mthd_data results[32];
61 uint32_t pi, ei;
62 };
63
64 static uint32_t
nvk_mme_test_state_load(void * _ts)65 nvk_mme_test_state_load(void *_ts)
66 {
67 struct nvk_mme_test_state *ts = _ts;
68 return ts->test->params[ts->pi++];
69 }
70
71 static uint32_t
nvk_mme_test_state_state(void * _ts,uint16_t addr)72 nvk_mme_test_state_state(void *_ts, uint16_t addr)
73 {
74 struct nvk_mme_test_state *ts = _ts;
75
76 /* First, look backwards through the expected data that we've already
77 * written. This ensures that mthd() impacts state().
78 */
79 for (int32_t i = ts->ei - 1; i >= 0; i--) {
80 if (ts->test->expected[i].mthd == addr)
81 return ts->test->expected[i].data;
82 }
83
84 /* Now look at init. We assume the init data is unique */
85 assert(ts->test->init != NULL && "Read uninitialized state");
86 for (uint32_t i = 0;; i++) {
87 if (ts->test->init[i].mthd == 0)
88 unreachable("Read uninitialized state");
89
90 if (ts->test->init[i].mthd == addr)
91 return ts->test->init[i].data;
92 }
93 }
94
95 static void
nvk_mme_test_state_mthd(void * _ts,uint16_t addr,uint32_t data)96 nvk_mme_test_state_mthd(void *_ts, uint16_t addr, uint32_t data)
97 {
98 struct nvk_mme_test_state *ts = _ts;
99
100 assert(ts->ei < ARRAY_SIZE(ts->results));
101 ts->results[ts->ei] = (struct nvk_mme_mthd_data) {
102 .mthd = addr,
103 .data = data,
104 };
105
106 if (ts->test->expected != NULL) {
107 assert(ts->test->expected[ts->ei].mthd != 0);
108 assert(ts->test->expected[ts->ei].mthd == addr);
109 assert(ts->test->expected[ts->ei].data == data);
110 }
111
112 ts->ei++;
113 }
114
115 const struct mme_sim_state_ops nvk_mme_test_state_ops = {
116 .load = nvk_mme_test_state_load,
117 .state = nvk_mme_test_state_state,
118 .mthd = nvk_mme_test_state_mthd,
119 };
120
121 void
nvk_test_all_mmes(const struct nv_device_info * devinfo)122 nvk_test_all_mmes(const struct nv_device_info *devinfo)
123 {
124 for (uint32_t mme = 0; mme < NVK_MME_COUNT; mme++) {
125 size_t size;
126 uint32_t *dw = nvk_build_mme(devinfo, mme, &size);
127 assert(dw != NULL);
128
129 if (mme_tests[mme] != NULL) {
130 for (uint32_t i = 0;; i++) {
131 if (mme_tests[mme][i].params == NULL)
132 break;
133
134 struct nvk_mme_test_state ts = {
135 .test = &mme_tests[mme][i],
136 };
137 mme_sim_core(devinfo, size, dw, &nvk_mme_test_state_ops, &ts);
138 if (ts.test->expected != NULL)
139 assert(ts.test->expected[ts.ei].mthd == 0);
140 if (ts.test->check != NULL)
141 ts.test->check(devinfo, ts.test, ts.results);
142 }
143 }
144
145 free(dw);
146 }
147 }
148