1 /*
2 * Copyright © 2018 Intel Corporation
3 * Copyright © 2018 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "v3d_compiler.h"
26 #include "compiler/nir/nir_builder.h"
27 #include "compiler/nir/nir_format_convert.h"
28
29 /** @file v3d_nir_lower_scratch.c
30 *
31 * Swizzles around the addresses of
32 * nir_intrinsic_load_scratch/nir_intrinsic_store_scratch so that a QPU stores
33 * a cacheline at a time per dword of scratch access.
34 */
35
36 static nir_def *
v3d_nir_scratch_offset(nir_builder * b,nir_intrinsic_instr * instr)37 v3d_nir_scratch_offset(nir_builder *b, nir_intrinsic_instr *instr)
38 {
39 b->cursor = nir_before_instr(&instr->instr);
40 nir_def *offset = nir_get_io_offset_src(instr)->ssa;
41
42 assert(nir_intrinsic_align_mul(instr) >= 4);
43 assert(nir_intrinsic_align_offset(instr) % 4 == 0);
44
45 /* The spill_offset register will already have the subgroup ID (EIDX)
46 * shifted and ORed in at bit 2, so all we need to do is to move the
47 * dword index up above V3D_CHANNELS.
48 */
49 return nir_imul_imm(b, offset, V3D_CHANNELS);
50 }
51
52 static void
v3d_nir_lower_scratch_instr(nir_builder * b,nir_intrinsic_instr * instr)53 v3d_nir_lower_scratch_instr(nir_builder *b, nir_intrinsic_instr *instr)
54 {
55 /* scalarized through nir_lower_mem_access_bit_sizes */
56 assert(instr->num_components == 1);
57
58 nir_def *offset = v3d_nir_scratch_offset(b, instr);
59 nir_src_rewrite(nir_get_io_offset_src(instr), offset);
60 }
61
62 static bool
v3d_nir_lower_scratch_cb(nir_builder * b,nir_intrinsic_instr * intr,void * _state)63 v3d_nir_lower_scratch_cb(nir_builder *b,
64 nir_intrinsic_instr *intr,
65 void *_state)
66 {
67 switch (intr->intrinsic) {
68 case nir_intrinsic_load_scratch:
69 case nir_intrinsic_store_scratch:
70 v3d_nir_lower_scratch_instr(b, intr);
71 return true;
72 default:
73 return false;
74 }
75
76 return false;
77 }
78
79 bool
v3d_nir_lower_scratch(nir_shader * s)80 v3d_nir_lower_scratch(nir_shader *s)
81 {
82 return nir_shader_intrinsics_pass(s, v3d_nir_lower_scratch_cb,
83 nir_metadata_control_flow, NULL);
84 }
85