1 /* 2 * Copyright © 2018 Rob Clark <[email protected]> 3 * SPDX-License-Identifier: MIT 4 * 5 * Authors: 6 * Rob Clark <[email protected]> 7 */ 8 9 #ifndef FREEDRENO_RINGBUFFER_SP_H_ 10 #define FREEDRENO_RINGBUFFER_SP_H_ 11 12 #include <assert.h> 13 #include <inttypes.h> 14 #include <pthread.h> 15 16 #include "util/hash_table.h" 17 #include "util/os_file.h" 18 #include "util/slab.h" 19 20 #include "freedreno_priv.h" 21 #include "freedreno_ringbuffer.h" 22 23 /* A "softpin" implementation of submit/ringbuffer, which lowers CPU overhead 24 * by avoiding the additional tracking necessary to build cmds/relocs tables 25 * (but still builds a bos table) 26 */ 27 28 typedef int (*flush_submit_list_fn)(struct list_head *submit_list); 29 30 struct fd_submit_sp { 31 struct fd_submit base; 32 33 DECLARE_ARRAY(struct fd_bo *, bos); 34 35 /* Keep a separate table of sub-alloc BOs.. the backing objects are 36 * tracked in the main bos table (because this is what the kernel 37 * sees), but we need to attach userspace fences to the sub-alloc'd 38 * BOs so the driver knows when they are idle 39 */ 40 DECLARE_ARRAY(struct fd_bo *, suballoc_bos); 41 42 /* maps fd_bo to idx in bos table: */ 43 struct hash_table *bo_table; 44 45 /* maps fd_bo to idx in suballoc_bos table: */ 46 struct hash_table *suballoc_bo_table; 47 48 struct slab_child_pool ring_pool; 49 50 /* Allow for sub-allocation of stateobj ring buffers (ie. sharing 51 * the same underlying bo).. 52 * 53 * We also rely on previous stateobj having been fully constructed 54 * so we can reclaim extra space at it's end. 55 */ 56 struct fd_ringbuffer *suballoc_ring; 57 58 /* Flush args, potentially attached to the last submit in the list 59 * of submits to merge: 60 */ 61 int in_fence_fd; 62 struct fd_fence *out_fence; 63 64 /* State for enqueued submits: 65 */ 66 struct list_head submit_list; /* includes this submit as last element */ 67 68 /* Used by retire_queue, if used by backend: */ 69 struct util_queue_fence retire_fence; 70 71 flush_submit_list_fn flush_submit_list; 72 73 uint32_t seqno; 74 }; 75 FD_DEFINE_CAST(fd_submit, fd_submit_sp); 76 77 /* for FD_RINGBUFFER_GROWABLE rb's, tracks the 'finalized' cmdstream buffers 78 * and sizes. Ie. a finalized buffer can have no more commands appended to 79 * it. 80 */ 81 struct fd_cmd_sp { 82 struct fd_bo *ring_bo; 83 unsigned size; 84 }; 85 86 struct fd_ringbuffer_sp { 87 struct fd_ringbuffer base; 88 89 /* for FD_RINGBUFFER_STREAMING rb's which are sub-allocated */ 90 unsigned offset; 91 92 union { 93 /* for _FD_RINGBUFFER_OBJECT case, the array of BOs referenced from 94 * this one 95 */ 96 struct { 97 struct fd_pipe *pipe; 98 DECLARE_ARRAY(struct fd_bo *, reloc_bos); 99 #ifndef NDEBUG 100 /* BOs to assert are attached to submit: */ 101 DECLARE_ARRAY(struct fd_bo *, assert_bos); 102 #endif 103 104 /** 105 * The seqno of the last submit we were emitted to. For stateobjs 106 * it is common to be re-emitted multiple times to the same submit, 107 * we can use this to detect the case. 108 */ 109 uint32_t last_submit_seqno; 110 }; 111 /* for other cases: */ 112 struct { 113 struct fd_submit *submit; 114 DECLARE_ARRAY(struct fd_cmd_sp, cmds); 115 }; 116 } u; 117 118 struct fd_bo *ring_bo; 119 }; 120 FD_DEFINE_CAST(fd_ringbuffer, fd_ringbuffer_sp); 121 122 void fd_pipe_sp_flush(struct fd_pipe *pipe, uint32_t fence); 123 uint32_t fd_submit_append_bo(struct fd_submit_sp *submit, struct fd_bo *bo); 124 struct fd_submit *fd_submit_sp_new(struct fd_pipe *pipe, 125 flush_submit_list_fn flush_submit_list); 126 void fd_pipe_sp_ringpool_init(struct fd_pipe *pipe); 127 void fd_pipe_sp_ringpool_fini(struct fd_pipe *pipe); 128 struct fd_ringbuffer *fd_ringbuffer_sp_new_object(struct fd_pipe *pipe, uint32_t size); 129 130 #endif /* FREEDRENO_RINGBUFFER_SP_H_ */ 131