1 /*
2 * Copyright 2010 Red Hat Inc.
3 * Authors: Dave Airlie
4 * SPDX-License-Identifier: MIT
5 */
6
7 #include <stdio.h>
8
9 #include "util/u_inlines.h"
10 #include "util/u_memory.h"
11 #include "util/u_upload_mgr.h"
12 #include "util/u_math.h"
13
14 #include "r300_screen_buffer.h"
15
r300_upload_index_buffer(struct r300_context * r300,struct pipe_resource ** index_buffer,unsigned index_size,unsigned * start,unsigned count,const uint8_t * ptr)16 void r300_upload_index_buffer(struct r300_context *r300,
17 struct pipe_resource **index_buffer,
18 unsigned index_size, unsigned *start,
19 unsigned count, const uint8_t *ptr)
20 {
21 unsigned index_offset;
22
23 *index_buffer = NULL;
24
25 u_upload_data(r300->uploader,
26 0, count * index_size, 4,
27 ptr + (*start * index_size),
28 &index_offset,
29 index_buffer);
30
31 *start = index_offset / index_size;
32 }
33
r300_resource_destroy(struct pipe_screen * screen,struct pipe_resource * buf)34 void r300_resource_destroy(struct pipe_screen *screen,
35 struct pipe_resource *buf)
36 {
37 struct r300_screen *rscreen = r300_screen(screen);
38
39 if (buf->target == PIPE_BUFFER) {
40 struct r300_resource *rbuf = r300_resource(buf);
41
42 align_free(rbuf->malloced_buffer);
43
44 if (rbuf->buf)
45 radeon_bo_reference(rscreen->rws, &rbuf->buf, NULL);
46
47 FREE(rbuf);
48 } else {
49 struct r300_resource* tex = (struct r300_resource*)buf;
50
51 if (tex->tex.cmask_dwords) {
52 mtx_lock(&rscreen->cmask_mutex);
53 if (buf == rscreen->cmask_resource) {
54 rscreen->cmask_resource = NULL;
55 }
56 mtx_unlock(&rscreen->cmask_mutex);
57 }
58 radeon_bo_reference(rscreen->rws, &tex->buf, NULL);
59 FREE(tex);
60 }
61 }
62
63 void *
r300_buffer_transfer_map(struct pipe_context * context,struct pipe_resource * resource,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer)64 r300_buffer_transfer_map( struct pipe_context *context,
65 struct pipe_resource *resource,
66 unsigned level,
67 unsigned usage,
68 const struct pipe_box *box,
69 struct pipe_transfer **ptransfer )
70 {
71 struct r300_context *r300 = r300_context(context);
72 struct radeon_winsys *rws = r300->screen->rws;
73 struct r300_resource *rbuf = r300_resource(resource);
74 struct pipe_transfer *transfer;
75 uint8_t *map;
76
77 transfer = slab_alloc(&r300->pool_transfers);
78 transfer->resource = resource;
79 transfer->level = level;
80 transfer->usage = usage;
81 transfer->box = *box;
82 transfer->stride = 0;
83 transfer->layer_stride = 0;
84
85 if (rbuf->malloced_buffer) {
86 *ptransfer = transfer;
87 return rbuf->malloced_buffer + box->x;
88 }
89
90 if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE &&
91 !(usage & PIPE_MAP_UNSYNCHRONIZED)) {
92 assert(usage & PIPE_MAP_WRITE);
93
94 /* Check if mapping this buffer would cause waiting for the GPU. */
95 if (r300->rws->cs_is_buffer_referenced(&r300->cs, rbuf->buf, RADEON_USAGE_READWRITE) ||
96 !r300->rws->buffer_wait(r300->rws, rbuf->buf, 0, RADEON_USAGE_READWRITE)) {
97 unsigned i;
98 struct pb_buffer_lean *new_buf;
99
100 /* Create a new one in the same pipe_resource. */
101 new_buf = r300->rws->buffer_create(r300->rws, rbuf->b.width0,
102 R300_BUFFER_ALIGNMENT,
103 rbuf->domain,
104 RADEON_FLAG_NO_INTERPROCESS_SHARING);
105 if (new_buf) {
106 /* Discard the old buffer. */
107 radeon_bo_reference(r300->rws, &rbuf->buf, NULL);
108 rbuf->buf = new_buf;
109
110 /* We changed the buffer, now we need to bind it where the old one was bound. */
111 for (i = 0; i < r300->nr_vertex_buffers; i++) {
112 if (r300->vertex_buffer[i].buffer.resource == &rbuf->b) {
113 r300->vertex_arrays_dirty = true;
114 break;
115 }
116 }
117 }
118 }
119 }
120
121 /* Buffers are never used for write, therefore mapping for read can be
122 * unsynchronized. */
123 if (!(usage & PIPE_MAP_WRITE)) {
124 usage |= PIPE_MAP_UNSYNCHRONIZED;
125 }
126
127 map = rws->buffer_map(rws, rbuf->buf, &r300->cs, usage);
128
129 if (!map) {
130 slab_free(&r300->pool_transfers, transfer);
131 return NULL;
132 }
133
134 *ptransfer = transfer;
135 return map + box->x;
136 }
137
r300_buffer_transfer_unmap(struct pipe_context * pipe,struct pipe_transfer * transfer)138 void r300_buffer_transfer_unmap( struct pipe_context *pipe,
139 struct pipe_transfer *transfer )
140 {
141 struct r300_context *r300 = r300_context(pipe);
142
143 slab_free(&r300->pool_transfers, transfer);
144 }
145
r300_buffer_create(struct pipe_screen * screen,const struct pipe_resource * templ)146 struct pipe_resource *r300_buffer_create(struct pipe_screen *screen,
147 const struct pipe_resource *templ)
148 {
149 struct r300_screen *r300screen = r300_screen(screen);
150 struct r300_resource *rbuf;
151
152 rbuf = MALLOC_STRUCT(r300_resource);
153
154 rbuf->b = *templ;
155 pipe_reference_init(&rbuf->b.reference, 1);
156 rbuf->b.screen = screen;
157 rbuf->domain = RADEON_DOMAIN_GTT;
158 rbuf->buf = NULL;
159 rbuf->malloced_buffer = NULL;
160
161 /* Allocate constant buffers and SWTCL vertex and index buffers in RAM.
162 * Note that uploaded index buffers use the flag PIPE_BIND_CUSTOM, so that
163 * we can distinguish them from user-created buffers.
164 */
165 if (templ->bind & PIPE_BIND_CONSTANT_BUFFER ||
166 (!r300screen->caps.has_tcl && !(templ->bind & PIPE_BIND_CUSTOM))) {
167 rbuf->malloced_buffer = align_malloc(templ->width0, 64);
168 return &rbuf->b;
169 }
170
171 rbuf->buf =
172 r300screen->rws->buffer_create(r300screen->rws, rbuf->b.width0,
173 R300_BUFFER_ALIGNMENT,
174 rbuf->domain,
175 RADEON_FLAG_NO_INTERPROCESS_SHARING);
176 if (!rbuf->buf) {
177 FREE(rbuf);
178 return NULL;
179 }
180 return &rbuf->b;
181 }
182