xref: /XiangShan/src/main/scala/xiangshan/backend/rob/Rab.scala (revision 6639e9a467468f4e1b05a25a5de4500772aedeb1)
1package xiangshan.backend.rob
2
3import org.chipsalliance.cde.config.Parameters
4import chisel3._
5import chisel3.util._
6import xiangshan._
7import utils._
8import utility._
9import xiangshan.backend.Bundles.DynInst
10import xiangshan.backend.{RabToVecExcpMod, RegWriteFromRab}
11import xiangshan.backend.decode.VectorConstants
12import xiangshan.backend.rename.SnapshotGenerator
13import chisel3.experimental.BundleLiterals._
14
15class RenameBufferPtr(size: Int) extends CircularQueuePtr[RenameBufferPtr](size) {
16  def this()(implicit p: Parameters) = this(p(XSCoreParamsKey).RabSize)
17}
18
19object RenameBufferPtr {
20  def apply(flag: Boolean = false, v: Int = 0)(implicit p: Parameters): RenameBufferPtr = {
21    val ptr = Wire(new RenameBufferPtr(p(XSCoreParamsKey).RabSize))
22    ptr.flag := flag.B
23    ptr.value := v.U
24    ptr
25  }
26}
27
28class RenameBufferEntry(implicit p: Parameters) extends XSBundle {
29  val info = new RabCommitInfo
30  val robIdx = OptionWrapper(!env.FPGAPlatform, new RobPtr)
31}
32
33class RenameBuffer(size: Int)(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper {
34  val io = IO(new Bundle {
35    val redirect = Input(ValidIO(new Bundle {
36    }))
37
38    val req = Vec(RenameWidth, Flipped(ValidIO(new DynInst)))
39    val fromRob = new Bundle {
40      val walkSize = Input(UInt(log2Up(size).W))
41      val walkEnd = Input(Bool())
42      val commitSize = Input(UInt(log2Up(size).W))
43      val vecLoadExcp = Input(ValidIO(new Bundle{
44        val isStrided = Bool()
45        val isVlm = Bool()
46      }))
47    }
48
49    val snpt = Input(new SnapshotPort)
50
51    val canEnq = Output(Bool())
52    val enqPtrVec = Output(Vec(RenameWidth, new RenameBufferPtr))
53
54    val commits = Output(new RabCommitIO)
55    val diffCommits = if (backendParams.basicDebugEn) Some(Output(new DiffCommitIO)) else None
56
57    val status = Output(new Bundle {
58      val walkEnd = Bool()
59      val commitEnd = Bool()
60    })
61    val toVecExcpMod = Output(new RabToVecExcpMod)
62  })
63
64  // alias
65  private val snptSelect = io.snpt.snptSelect
66
67  // pointer
68  private val enqPtrVec = RegInit(VecInit.tabulate(RenameWidth)(idx => RenameBufferPtr(flag = false, idx)))
69  private val enqPtr = enqPtrVec.head
70  private val enqPtrOH = RegInit(1.U(size.W))
71  private val enqPtrOHShift = CircularShift(enqPtrOH)
72  // may shift [0, RenameWidth] steps
73  private val enqPtrOHVec = VecInit.tabulate(RenameWidth + 1)(enqPtrOHShift.left)
74  private val enqPtrVecNext = Wire(enqPtrVec.cloneType)
75
76  private val deqPtrVec = RegInit(VecInit.tabulate(RabCommitWidth)(idx => RenameBufferPtr(flag = false, idx)))
77  private val deqPtr = deqPtrVec.head
78  private val deqPtrOH = RegInit(1.U(size.W))
79  private val deqPtrOHShift = CircularShift(deqPtrOH)
80  private val deqPtrOHVec = VecInit.tabulate(RabCommitWidth + 1)(deqPtrOHShift.left)
81  private val deqPtrVecNext = Wire(deqPtrVec.cloneType)
82  XSError(deqPtr.toOH =/= deqPtrOH, p"wrong one-hot reg between $deqPtr and $deqPtrOH")
83
84  private val walkPtr = Reg(new RenameBufferPtr)
85  private val walkPtrOH = walkPtr.toOH
86  private val walkPtrOHVec = VecInit.tabulate(RabCommitWidth + 1)(CircularShift(walkPtrOH).left)
87  private val walkPtrNext = Wire(new RenameBufferPtr)
88
89  private val walkPtrSnapshots = SnapshotGenerator(enqPtr, io.snpt.snptEnq, io.snpt.snptDeq, io.redirect.valid, io.snpt.flushVec)
90
91  val vcfgPtrOH = RegInit(1.U(size.W))
92  val vcfgPtrOHShift = CircularShift(vcfgPtrOH)
93  // may shift [0, 2) steps
94  val vcfgPtrOHVec = VecInit.tabulate(2)(vcfgPtrOHShift.left)
95
96  val diffPtr = RegInit(0.U.asTypeOf(new RenameBufferPtr))
97  val diffPtrNext = Wire(new RenameBufferPtr)
98  // Regs
99  val renameBuffer = Reg(Vec(size, new RenameBufferEntry))
100  val renameBufferEntries = VecInit((0 until size) map (i => renameBuffer(i)))
101
102  val vecLoadExcp = Reg(io.fromRob.vecLoadExcp.cloneType)
103
104  private val maxLMUL = 8
105  private val vdIdxWidth = log2Up(maxLMUL + 1)
106  val currentVdIdx = Reg(UInt(vdIdxWidth.W)) // store 0~8
107
108  val s_idle :: s_special_walk :: s_walk :: Nil = Enum(3)
109  val state = RegInit(s_idle)
110  val stateNext = WireInit(state) // otherwise keep state value
111
112  private val robWalkEndReg = RegInit(false.B)
113  private val robWalkEnd = io.fromRob.walkEnd || robWalkEndReg
114
115  when(io.redirect.valid) {
116    robWalkEndReg := false.B
117  }.elsewhen(io.fromRob.walkEnd) {
118    robWalkEndReg := true.B
119  }
120
121  val realNeedAlloc = io.req.map(req => req.valid && req.bits.needWriteRf)
122  val enqCount    = PopCount(realNeedAlloc)
123  val commitNum = Wire(UInt(log2Up(RabCommitWidth).W))
124  val walkNum = Wire(UInt(log2Up(RabCommitWidth).W))
125  commitNum := Mux(io.commits.commitValid(0), PriorityMux((0 until RabCommitWidth).map(
126    i => io.commits.commitValid(RabCommitWidth - 1 - i) -> (RabCommitWidth - i).U
127  )), 0.U)
128  walkNum := Mux(io.commits.walkValid(0), PriorityMux((0 until RabCommitWidth).map(
129    i => io.commits.walkValid(RabCommitWidth - 1 - i) -> (RabCommitWidth-i).U
130  )), 0.U)
131  val commitCount = Mux(io.commits.isCommit && !io.commits.isWalk, commitNum, 0.U)
132  val walkCount   = Mux(io.commits.isWalk && !io.commits.isCommit, walkNum, 0.U)
133  val specialWalkCount = Mux(io.commits.isCommit && io.commits.isWalk, walkNum, 0.U)
134
135  // number of pair(ldest, pdest) ready to commit to arch_rat
136  val commitSize = RegInit(0.U(log2Up(size).W))
137  val walkSize = RegInit(0.U(log2Up(size).W))
138  val specialWalkSize = RegInit(0.U(log2Up(size).W))
139
140  val newCommitSize = io.fromRob.commitSize
141  val newWalkSize = io.fromRob.walkSize
142
143  val commitSizeNxt = commitSize + newCommitSize - commitCount
144  val walkSizeNxt = walkSize + newWalkSize - walkCount
145
146  val newSpecialWalkSize = Mux(io.redirect.valid && !io.snpt.useSnpt, commitSizeNxt, 0.U)
147  val specialWalkSizeNext = specialWalkSize + newSpecialWalkSize - specialWalkCount
148
149  commitSize := Mux(io.redirect.valid && !io.snpt.useSnpt, 0.U, commitSizeNxt)
150  specialWalkSize := specialWalkSizeNext
151  walkSize := Mux(io.redirect.valid, 0.U, walkSizeNxt)
152
153  walkPtrNext := MuxCase(walkPtr, Seq(
154    (state === s_idle && stateNext === s_walk) -> walkPtrSnapshots(snptSelect),
155    (state === s_special_walk && stateNext === s_walk) -> deqPtrVecNext.head,
156    (state === s_walk && io.snpt.useSnpt && io.redirect.valid) -> walkPtrSnapshots(snptSelect),
157    (state === s_walk) -> (walkPtr + walkCount),
158  ))
159
160  walkPtr := walkPtrNext
161
162  val walkCandidates   = VecInit(walkPtrOHVec.map(sel => Mux1H(sel, renameBufferEntries)))
163  val commitCandidates = VecInit(deqPtrOHVec.map(sel => Mux1H(sel, renameBufferEntries)))
164  val vcfgCandidates   = VecInit(vcfgPtrOHVec.map(sel => Mux1H(sel, renameBufferEntries)))
165
166  // update diff pointer
167  diffPtrNext := diffPtr + newCommitSize
168  diffPtr := diffPtrNext
169
170  // update vcfg pointer
171  // TODO: do not use diffPtrNext here
172  vcfgPtrOH := diffPtrNext.toOH
173
174  // update enq pointer
175  val enqPtrNext = Mux(
176    state === s_walk && stateNext === s_idle,
177    walkPtrNext,
178    enqPtr + enqCount
179  )
180  val enqPtrOHNext = Mux(
181    state === s_walk && stateNext === s_idle,
182    walkPtrNext.toOH,
183    enqPtrOHVec(enqCount)
184  )
185  enqPtr := enqPtrNext
186  enqPtrOH := enqPtrOHNext
187  enqPtrVecNext.zipWithIndex.map{ case(ptr, i) => ptr := enqPtrNext + i.U }
188  enqPtrVec := enqPtrVecNext
189
190  val deqPtrSteps = Mux1H(Seq(
191    (state === s_idle) -> commitCount,
192    (state === s_special_walk) -> specialWalkCount,
193  ))
194
195  // update deq pointer
196  val deqPtrNext = deqPtr + deqPtrSteps
197  val deqPtrOHNext = deqPtrOHVec(deqPtrSteps)
198  deqPtr := deqPtrNext
199  deqPtrOH := deqPtrOHNext
200  deqPtrVecNext.zipWithIndex.map{ case(ptr, i) => ptr := deqPtrNext + i.U }
201  deqPtrVec := deqPtrVecNext
202
203  val allocatePtrVec = VecInit((0 until RenameWidth).map(i => enqPtrVec(PopCount(realNeedAlloc.take(i))).value))
204  allocatePtrVec.zip(io.req).zip(realNeedAlloc).map{ case((allocatePtr, req), realNeedAlloc) =>
205    when(realNeedAlloc){
206      renameBuffer(allocatePtr).info := req.bits
207      renameBuffer(allocatePtr).robIdx.foreach(_ := req.bits.robIdx)
208    }
209  }
210
211  io.commits.isCommit := state === s_idle || state === s_special_walk
212  io.commits.isWalk := state === s_walk || state === s_special_walk
213
214  for(i <- 0 until RabCommitWidth) {
215    io.commits.commitValid(i) := state === s_idle && i.U < commitSize || state === s_special_walk && i.U < specialWalkSize
216    io.commits.walkValid(i) := state === s_walk && i.U < walkSize || state === s_special_walk && i.U < specialWalkSize
217    // special walk use commitPtr
218    io.commits.info(i) := Mux(state === s_idle || state === s_special_walk, commitCandidates(i).info, walkCandidates(i).info)
219    io.commits.robIdx.foreach(_(i) := Mux(state === s_idle || state === s_special_walk, commitCandidates(i).robIdx.get, walkCandidates(i).robIdx.get))
220  }
221
222  private val walkEndNext = walkSizeNxt === 0.U
223  private val commitEndNext = commitSizeNxt === 0.U
224  private val specialWalkEndNext = specialWalkSize <= RabCommitWidth.U
225  // when robWalkEndReg is 1, walkSize donot increase and decrease RabCommitWidth per Cycle
226  private val walkEndNextCycle = (robWalkEndReg || io.fromRob.walkEnd && io.fromRob.walkSize === 0.U) && (walkSize <= RabCommitWidth.U)
227  // change state
228  state := stateNext
229  when(io.redirect.valid) {
230    when(io.snpt.useSnpt) {
231      stateNext := s_walk
232    }.otherwise {
233      stateNext := s_special_walk
234      vecLoadExcp := io.fromRob.vecLoadExcp
235      when(io.fromRob.vecLoadExcp.valid) {
236        currentVdIdx := 0.U
237      }
238    }
239  }.otherwise {
240    // change stateNext
241    switch(state) {
242      // this transaction is not used actually, just list all states
243      is(s_idle) {
244        stateNext := s_idle
245      }
246      is(s_special_walk) {
247        currentVdIdx := currentVdIdx + specialWalkCount
248        when(specialWalkEndNext) {
249          stateNext := s_walk
250          vecLoadExcp.valid := false.B
251        }
252      }
253      is(s_walk) {
254        when(walkEndNextCycle) {
255          stateNext := s_idle
256        }
257      }
258    }
259  }
260
261  val numValidEntries = distanceBetween(enqPtr, deqPtr)
262  val allowEnqueue = GatedValidRegNext(numValidEntries + enqCount <= (size - RenameWidth).U, true.B)
263
264  io.canEnq := allowEnqueue && state === s_idle
265  io.enqPtrVec := enqPtrVec
266
267  io.status.walkEnd := walkEndNext
268  io.status.commitEnd := commitEndNext
269
270  for (i <- 0 until RabCommitWidth) {
271    io.toVecExcpMod.logicPhyRegMap(i).valid := (state === s_special_walk) && vecLoadExcp.valid &&
272      io.commits.commitValid(i)
273    io.toVecExcpMod.logicPhyRegMap(i).bits match {
274      case x =>
275        x.lreg := io.commits.info(i).ldest
276        x.preg := io.commits.info(i).pdest
277    }
278  }
279
280  // for difftest
281  io.diffCommits.foreach(_ := 0.U.asTypeOf(new DiffCommitIO))
282  io.diffCommits.foreach(_.isCommit := true.B)
283  for(i <- 0 until RabCommitWidth * MaxUopSize) {
284    io.diffCommits.foreach(_.commitValid(i) := i.U < newCommitSize)
285    io.diffCommits.foreach(_.info(i) := renameBufferEntries((diffPtr + i.U).value).info)
286  }
287
288  XSError(isBefore(enqPtr, deqPtr) && !isFull(enqPtr, deqPtr), "\ndeqPtr is older than enqPtr!\n")
289
290  QueuePerf(RabSize, numValidEntries, numValidEntries === size.U)
291
292  if (backendParams.debugEn) {
293    dontTouch(deqPtrVec)
294    dontTouch(walkPtrNext)
295    dontTouch(walkSizeNxt)
296    dontTouch(walkEndNext)
297    dontTouch(walkEndNextCycle)
298  }
299
300  XSPerfAccumulate("s_idle_to_idle", state === s_idle         && stateNext === s_idle)
301  XSPerfAccumulate("s_idle_to_swlk", state === s_idle         && stateNext === s_special_walk)
302  XSPerfAccumulate("s_idle_to_walk", state === s_idle         && stateNext === s_walk)
303  XSPerfAccumulate("s_swlk_to_idle", state === s_special_walk && stateNext === s_idle)
304  XSPerfAccumulate("s_swlk_to_swlk", state === s_special_walk && stateNext === s_special_walk)
305  XSPerfAccumulate("s_swlk_to_walk", state === s_special_walk && stateNext === s_walk)
306  XSPerfAccumulate("s_walk_to_idle", state === s_walk         && stateNext === s_idle)
307  XSPerfAccumulate("s_walk_to_swlk", state === s_walk         && stateNext === s_special_walk)
308  XSPerfAccumulate("s_walk_to_walk", state === s_walk         && stateNext === s_walk)
309
310  XSPerfAccumulate("disallow_enq_cycle", !allowEnqueue)
311  XSPerfAccumulate("disallow_enq_full_cycle", numValidEntries + enqCount > (size - RenameWidth).U)
312  XSPerfAccumulate("disallow_enq_not_idle_cycle", state =/= s_idle)
313}
314