xref: /XiangShan/src/main/scala/xiangshan/backend/rob/Rab.scala (revision ffc4f3c27f2e8b15cf745e53b8937b8fe1259471)
1package xiangshan.backend.rob
2
3import org.chipsalliance.cde.config.Parameters
4import chisel3._
5import chisel3.util._
6import xiangshan._
7import utils._
8import utility._
9import xiangshan.backend.Bundles.DynInst
10import xiangshan.backend.decode.VectorConstants
11import xiangshan.backend.rename.SnapshotGenerator
12
13class RenameBufferPtr(size: Int) extends CircularQueuePtr[RenameBufferPtr](size) {
14  def this()(implicit p: Parameters) = this(p(XSCoreParamsKey).RabSize)
15}
16
17object RenameBufferPtr {
18  def apply(flag: Boolean = false, v: Int = 0)(implicit p: Parameters): RenameBufferPtr = {
19    val ptr = Wire(new RenameBufferPtr(p(XSCoreParamsKey).RabSize))
20    ptr.flag := flag.B
21    ptr.value := v.U
22    ptr
23  }
24}
25
26class RenameBufferEntry(implicit p: Parameters) extends RobCommitInfo {
27  val robIdx = new RobPtr
28}
29
30class RenameBuffer(size: Int)(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper {
31  val io = IO(new Bundle {
32    val redirect = Input(ValidIO(new Bundle {
33    }))
34
35    val req = Vec(RenameWidth, Flipped(ValidIO(new DynInst)))
36    val fromRob = new Bundle {
37      val walkSize = Input(UInt(log2Up(size).W))
38      val walkEnd = Input(Bool())
39      val commitSize = Input(UInt(log2Up(size).W))
40    }
41
42    val snpt = Input(new SnapshotPort)
43
44    val canEnq = Output(Bool())
45    val enqPtrVec = Output(Vec(RenameWidth, new RenameBufferPtr))
46    val vconfigPdest = Output(UInt(PhyRegIdxWidth.W))
47    val commits = Output(new RobCommitIO)
48    val diffCommits = Output(new DiffCommitIO)
49
50    val status = Output(new Bundle {
51      val walkEnd = Bool()
52    })
53  })
54
55  // alias
56  private val snptSelect = io.snpt.snptSelect
57
58  // pointer
59  private val enqPtrVec = RegInit(VecInit.tabulate(RenameWidth)(idx => RenameBufferPtr(flag = false, idx)))
60  private val enqPtr = enqPtrVec.head
61  private val enqPtrOH = RegInit(1.U(size.W))
62  private val enqPtrOHShift = CircularShift(enqPtrOH)
63  // may shift [0, RenameWidth] steps
64  private val enqPtrOHVec = VecInit.tabulate(RenameWidth + 1)(enqPtrOHShift.left)
65  private val enqPtrVecNext = Wire(enqPtrVec.cloneType)
66
67  private val deqPtrVec = RegInit(VecInit.tabulate(CommitWidth)(idx => RenameBufferPtr(flag = false, idx)))
68  private val deqPtr = deqPtrVec.head
69  private val deqPtrOH = RegInit(1.U(size.W))
70  private val deqPtrOHShift = CircularShift(deqPtrOH)
71  private val deqPtrOHVec = VecInit.tabulate(CommitWidth + 1)(deqPtrOHShift.left)
72  private val deqPtrVecNext = Wire(deqPtrVec.cloneType)
73  XSError(deqPtr.toOH =/= deqPtrOH, p"wrong one-hot reg between $deqPtr and $deqPtrOH")
74
75  private val walkPtr = Reg(new RenameBufferPtr)
76  private val walkPtrOH = walkPtr.toOH
77  private val walkPtrOHVec = VecInit.tabulate(CommitWidth + 1)(CircularShift(walkPtrOH).left)
78  private val walkPtrNext = Wire(new RenameBufferPtr)
79
80  private val walkPtrSnapshots = SnapshotGenerator(enqPtr, io.snpt.snptEnq, io.snpt.snptDeq, io.redirect.valid, io.snpt.flushVec)
81
82  // We should extra walk these preg pairs which compressed in rob enq entry at last cycle after restored snapshots.
83  // enq firstuop: b010100 --invert--> b101011 --keep only continuous 1s from head--> b000011
84  // enq firstuop: b111101 --invert--> b000010 --keep only continuous 1s from head--> b000000
85  private val enqCompressedLastCycleMask: UInt = VecInit(io.req.indices.map(i => io.req.slice(0, i + 1).map(!_.bits.firstUop).reduce(_ && _))).asUInt
86  private val compressedLastRobEntryMaskSnapshots = SnapshotGenerator(enqCompressedLastCycleMask, io.snpt.snptEnq, io.snpt.snptDeq, io.redirect.valid, io.snpt.flushVec)
87  private val compressedExtraWalkMask = compressedLastRobEntryMaskSnapshots(snptSelect)
88  // b111111 --Cat(x,1)--> b1111111 --Reverse--> b1111111 --PriorityEncoder--> 6.U
89  // b001111 --Cat(x,1)--> b0011111 --Reverse--> b1111100 --PriorityEncoder--> 4.U
90  // b000011 --Cat(x,1)--> b0000111 --Reverse--> b1110000 --PriorityEncoder--> 2.U
91  // b000000 --Cat(x,1)--> b0000001 --Reverse--> b1000000 --PriorityEncoder--> 0.U
92  private val compressedExtraWalkSize = PriorityMux(Reverse(Cat(compressedExtraWalkMask, 1.U(1.W))), (0 to RenameWidth).map(i => (RenameWidth - i).U))
93
94  val vcfgPtrOH = RegInit(1.U(size.W))
95  val vcfgPtrOHShift = CircularShift(vcfgPtrOH)
96  // may shift [0, 2) steps
97  val vcfgPtrOHVec = VecInit.tabulate(2)(vcfgPtrOHShift.left)
98
99  val diffPtr = RegInit(0.U.asTypeOf(new RenameBufferPtr))
100  val diffPtrNext = Wire(new RenameBufferPtr)
101  // Regs
102  val renameBuffer = Mem(size, new RenameBufferEntry)
103  val renameBufferEntries = VecInit((0 until size) map (i => renameBuffer(i)))
104
105  val s_idle :: s_special_walk :: s_walk :: Nil = Enum(3)
106  val state = RegInit(s_idle)
107  val stateNext = WireInit(state) // otherwise keep state value
108
109  private val robWalkEndReg = RegInit(false.B)
110  private val robWalkEnd = io.fromRob.walkEnd || robWalkEndReg
111
112  when(io.redirect.valid) {
113    robWalkEndReg := false.B
114  }.elsewhen(io.fromRob.walkEnd) {
115    robWalkEndReg := true.B
116  }
117
118  val realNeedAlloc = io.req.map(req => req.valid && req.bits.needWriteRf)
119  val enqCount    = PopCount(realNeedAlloc)
120  val commitCount = Mux(io.commits.isCommit && !io.commits.isWalk, PopCount(io.commits.commitValid), 0.U)
121  val walkCount   = Mux(io.commits.isWalk && !io.commits.isCommit, PopCount(io.commits.walkValid), 0.U)
122  val specialWalkCount = Mux(io.commits.isCommit && io.commits.isWalk, PopCount(io.commits.walkValid), 0.U)
123
124  // number of pair(ldest, pdest) ready to commit to arch_rat
125  val commitSize = RegInit(0.U(log2Up(size).W))
126  val walkSize = RegInit(0.U(log2Up(size).W))
127  val specialWalkSize = RegInit(0.U(log2Up(size).W))
128
129  val newCommitSize = io.fromRob.commitSize
130  val newWalkSize = io.fromRob.walkSize
131
132  val commitSizeNxt = commitSize + newCommitSize - commitCount
133  val walkSizeNxt = walkSize + newWalkSize - walkCount
134
135  val newSpecialWalkSize = Mux(io.redirect.valid && !io.snpt.useSnpt, commitSizeNxt, 0.U)
136  val specialWalkSizeNext = specialWalkSize + newSpecialWalkSize - specialWalkCount
137
138  commitSize := Mux(io.redirect.valid && !io.snpt.useSnpt, 0.U, commitSizeNxt)
139  specialWalkSize := specialWalkSizeNext
140  walkSize := Mux(io.redirect.valid, Mux(io.snpt.useSnpt, compressedExtraWalkSize, 0.U), walkSizeNxt)
141
142  walkPtrNext := MuxCase(walkPtr, Seq(
143    (state === s_idle && stateNext === s_walk) -> walkPtrSnapshots(snptSelect),
144    (state === s_special_walk && stateNext === s_walk) -> deqPtrVecNext.head,
145    (state === s_walk && io.snpt.useSnpt && io.redirect.valid) -> walkPtrSnapshots(snptSelect),
146    (state === s_walk) -> (walkPtr + walkCount),
147  ))
148
149  walkPtr := walkPtrNext
150
151  val walkCandidates   = VecInit(walkPtrOHVec.map(sel => Mux1H(sel, renameBufferEntries)))
152  val commitCandidates = VecInit(deqPtrOHVec.map(sel => Mux1H(sel, renameBufferEntries)))
153  val vcfgCandidates   = VecInit(vcfgPtrOHVec.map(sel => Mux1H(sel, renameBufferEntries)))
154
155  // update diff pointer
156  diffPtrNext := Mux(state === s_idle, diffPtr + newCommitSize, diffPtr)
157  diffPtr := diffPtrNext
158
159  // update vcfg pointer
160  // TODO: do not use diffPtrNext here
161  vcfgPtrOH := diffPtrNext.toOH
162
163  // update enq pointer
164  val enqPtrNext = Mux(
165    state === s_walk && stateNext === s_idle,
166    walkPtrNext,
167    enqPtr + enqCount
168  )
169  val enqPtrOHNext = Mux(
170    state === s_walk && stateNext === s_idle,
171    walkPtrNext.toOH,
172    enqPtrOHVec(enqCount)
173  )
174  enqPtr := enqPtrNext
175  enqPtrOH := enqPtrOHNext
176  enqPtrVecNext.zipWithIndex.map{ case(ptr, i) => ptr := enqPtrNext + i.U }
177  enqPtrVec := enqPtrVecNext
178
179  val deqPtrSteps = Mux1H(Seq(
180    (state === s_idle) -> commitCount,
181    (state === s_special_walk) -> specialWalkCount,
182  ))
183
184  // update deq pointer
185  val deqPtrNext = deqPtr + deqPtrSteps
186  val deqPtrOHNext = deqPtrOHVec(deqPtrSteps)
187  deqPtr := deqPtrNext
188  deqPtrOH := deqPtrOHNext
189  deqPtrVecNext.zipWithIndex.map{ case(ptr, i) => ptr := deqPtrNext + i.U }
190  deqPtrVec := deqPtrVecNext
191
192  val allocatePtrVec = VecInit((0 until RenameWidth).map(i => enqPtrVec(PopCount(realNeedAlloc.take(i))).value))
193  allocatePtrVec.zip(io.req).zip(realNeedAlloc).map{ case((allocatePtr, req), realNeedAlloc) =>
194    when(realNeedAlloc){
195      renameBuffer(allocatePtr).ldest := req.bits.ldest
196      renameBuffer(allocatePtr).pdest := req.bits.pdest
197      renameBuffer(allocatePtr).rfWen := req.bits.rfWen
198      renameBuffer(allocatePtr).fpWen := req.bits.fpWen
199      renameBuffer(allocatePtr).vecWen := req.bits.vecWen
200      renameBuffer(allocatePtr).isMove := req.bits.eliminatedMove
201      renameBuffer(allocatePtr).robIdx := req.bits.robIdx
202    }
203  }
204
205  io.commits.isCommit := state === s_idle || state === s_special_walk
206  io.commits.isWalk := state === s_walk || state === s_special_walk
207
208  for(i <- 0 until CommitWidth) {
209    io.commits.commitValid(i) := state === s_idle && i.U < commitSize || state === s_special_walk && i.U < specialWalkSize
210    io.commits.walkValid(i) := state === s_walk && i.U < walkSize || state === s_special_walk && i.U < specialWalkSize
211    // special walk use commitPtr
212    io.commits.info(i) := Mux(state === s_idle || state === s_special_walk, commitCandidates(i), walkCandidates(i))
213    // Todo: remove this
214    io.commits.robIdx(i) := Mux(state === s_idle || state === s_special_walk, commitCandidates(i).robIdx, walkCandidates(i).robIdx)
215  }
216
217  private val walkEndNext = walkSizeNxt === 0.U
218  private val specialWalkEndNext = specialWalkSizeNext === 0.U
219
220  // change state
221  state := stateNext
222  when(io.redirect.valid) {
223    when(io.snpt.useSnpt) {
224      stateNext := s_walk
225    }.otherwise {
226      stateNext := s_special_walk
227    }
228  }.otherwise {
229    // change stateNext
230    switch(state) {
231      // this transaction is not used actually, just list all states
232      is(s_idle) {
233        stateNext := s_idle
234      }
235      is(s_special_walk) {
236        when(specialWalkEndNext) {
237          stateNext := s_walk
238        }
239      }
240      is(s_walk) {
241        when(robWalkEnd && walkEndNext) {
242          stateNext := s_idle
243        }
244      }
245    }
246  }
247
248  val numValidEntries = distanceBetween(enqPtr, deqPtr)
249  val allowEnqueue = RegNext(numValidEntries + enqCount <= (size - RenameWidth).U, true.B)
250
251  io.canEnq := allowEnqueue && state === s_idle
252  io.enqPtrVec := enqPtrVec
253
254  io.status.walkEnd := walkEndNext
255
256  io.vconfigPdest := Mux(vcfgCandidates(0).ldest === VCONFIG_IDX.U && vcfgCandidates(0).vecWen, vcfgCandidates(0).pdest, vcfgCandidates(1).pdest)
257
258  // for difftest
259  io.diffCommits := 0.U.asTypeOf(new DiffCommitIO)
260  io.diffCommits.isCommit := state === s_idle || state === s_special_walk
261  for(i <- 0 until CommitWidth * MaxUopSize) {
262    io.diffCommits.commitValid(i) := (state === s_idle || state === s_special_walk) && i.U < newCommitSize
263    io.diffCommits.info(i) := renameBufferEntries((diffPtr + i.U).value)
264  }
265
266  XSError(isBefore(enqPtr, deqPtr) && !isFull(enqPtr, deqPtr), "\ndeqPtr is older than enqPtr!\n")
267
268  QueuePerf(RabSize, numValidEntries, numValidEntries === size.U)
269
270  XSPerfAccumulate("s_idle_to_idle", state === s_idle         && stateNext === s_idle)
271  XSPerfAccumulate("s_idle_to_swlk", state === s_idle         && stateNext === s_special_walk)
272  XSPerfAccumulate("s_idle_to_walk", state === s_idle         && stateNext === s_walk)
273  XSPerfAccumulate("s_swlk_to_idle", state === s_special_walk && stateNext === s_idle)
274  XSPerfAccumulate("s_swlk_to_swlk", state === s_special_walk && stateNext === s_special_walk)
275  XSPerfAccumulate("s_swlk_to_walk", state === s_special_walk && stateNext === s_walk)
276  XSPerfAccumulate("s_walk_to_idle", state === s_walk         && stateNext === s_idle)
277  XSPerfAccumulate("s_walk_to_swlk", state === s_walk         && stateNext === s_special_walk)
278  XSPerfAccumulate("s_walk_to_walk", state === s_walk         && stateNext === s_walk)
279
280  XSPerfAccumulate("disallow_enq_cycle", !allowEnqueue)
281  XSPerfAccumulate("disallow_enq_full_cycle", numValidEntries + enqCount > (size - RenameWidth).U)
282  XSPerfAccumulate("disallow_enq_not_idle_cycle", state =/= s_idle)
283}
284