xref: /XiangShan/src/main/scala/xiangshan/backend/rob/Rab.scala (revision 1bc48dd1fa0af361fd194c65bad3b86349ec2903)
1package xiangshan.backend.rob
2
3import org.chipsalliance.cde.config.Parameters
4import chisel3._
5import chisel3.util._
6import xiangshan._
7import utils._
8import utility._
9import xiangshan.backend.Bundles.DynInst
10import xiangshan.backend.{RabToVecExcpMod, RegWriteFromRab}
11import xiangshan.backend.decode.VectorConstants
12import xiangshan.backend.rename.SnapshotGenerator
13import chisel3.experimental.BundleLiterals._
14
15class RenameBufferPtr(size: Int) extends CircularQueuePtr[RenameBufferPtr](size) {
16  def this()(implicit p: Parameters) = this(p(XSCoreParamsKey).RabSize)
17}
18
19object RenameBufferPtr {
20  def apply(flag: Boolean = false, v: Int = 0)(implicit p: Parameters): RenameBufferPtr = {
21    val ptr = Wire(new RenameBufferPtr(p(XSCoreParamsKey).RabSize))
22    ptr.flag := flag.B
23    ptr.value := v.U
24    ptr
25  }
26}
27
28class RenameBufferEntry(implicit p: Parameters) extends XSBundle {
29  val info = new RabCommitInfo
30  val robIdx = OptionWrapper(!env.FPGAPlatform, new RobPtr)
31}
32
33class RenameBuffer(size: Int)(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper {
34  val io = IO(new Bundle {
35    val redirect = Input(ValidIO(new Bundle {
36    }))
37
38    val req = Vec(RenameWidth, Flipped(ValidIO(new DynInst)))
39    val fromRob = new Bundle {
40      val walkSize = Input(UInt(log2Up(size).W))
41      val walkEnd = Input(Bool())
42      val commitSize = Input(UInt(log2Up(size).W))
43      val vecLoadExcp = Input(ValidIO(new Bundle{
44        val isStrided = Bool()
45        val isVlm = Bool()
46      }))
47    }
48
49    val snpt = Input(new SnapshotPort)
50
51    val canEnq = Output(Bool())
52    val enqPtrVec = Output(Vec(RenameWidth, new RenameBufferPtr))
53
54    val commits = Output(new RabCommitIO)
55    val diffCommits = if (backendParams.basicDebugEn) Some(Output(new DiffCommitIO)) else None
56
57    val status = Output(new Bundle {
58      val walkEnd = Bool()
59    })
60    val toVecExcpMod = Output(new RabToVecExcpMod)
61  })
62
63  // alias
64  private val snptSelect = io.snpt.snptSelect
65
66  // pointer
67  private val enqPtrVec = RegInit(VecInit.tabulate(RenameWidth)(idx => RenameBufferPtr(flag = false, idx)))
68  private val enqPtr = enqPtrVec.head
69  private val enqPtrOH = RegInit(1.U(size.W))
70  private val enqPtrOHShift = CircularShift(enqPtrOH)
71  // may shift [0, RenameWidth] steps
72  private val enqPtrOHVec = VecInit.tabulate(RenameWidth + 1)(enqPtrOHShift.left)
73  private val enqPtrVecNext = Wire(enqPtrVec.cloneType)
74
75  private val deqPtrVec = RegInit(VecInit.tabulate(RabCommitWidth)(idx => RenameBufferPtr(flag = false, idx)))
76  private val deqPtr = deqPtrVec.head
77  private val deqPtrOH = RegInit(1.U(size.W))
78  private val deqPtrOHShift = CircularShift(deqPtrOH)
79  private val deqPtrOHVec = VecInit.tabulate(RabCommitWidth + 1)(deqPtrOHShift.left)
80  private val deqPtrVecNext = Wire(deqPtrVec.cloneType)
81  XSError(deqPtr.toOH =/= deqPtrOH, p"wrong one-hot reg between $deqPtr and $deqPtrOH")
82
83  private val walkPtr = Reg(new RenameBufferPtr)
84  private val walkPtrOH = walkPtr.toOH
85  private val walkPtrOHVec = VecInit.tabulate(RabCommitWidth + 1)(CircularShift(walkPtrOH).left)
86  private val walkPtrNext = Wire(new RenameBufferPtr)
87
88  private val walkPtrSnapshots = SnapshotGenerator(enqPtr, io.snpt.snptEnq, io.snpt.snptDeq, io.redirect.valid, io.snpt.flushVec)
89
90  val vcfgPtrOH = RegInit(1.U(size.W))
91  val vcfgPtrOHShift = CircularShift(vcfgPtrOH)
92  // may shift [0, 2) steps
93  val vcfgPtrOHVec = VecInit.tabulate(2)(vcfgPtrOHShift.left)
94
95  val diffPtr = RegInit(0.U.asTypeOf(new RenameBufferPtr))
96  val diffPtrNext = Wire(new RenameBufferPtr)
97  // Regs
98  val renameBuffer = Reg(Vec(size, new RenameBufferEntry))
99  val renameBufferEntries = VecInit((0 until size) map (i => renameBuffer(i)))
100
101  val vecLoadExcp = Reg(io.fromRob.vecLoadExcp.cloneType)
102
103  private val maxLMUL = 8
104  private val vdIdxWidth = log2Up(maxLMUL + 1)
105  val currentVdIdx = Reg(UInt(vdIdxWidth.W)) // store 0~8
106
107  val s_idle :: s_special_walk :: s_walk :: Nil = Enum(3)
108  val state = RegInit(s_idle)
109  val stateNext = WireInit(state) // otherwise keep state value
110
111  private val robWalkEndReg = RegInit(false.B)
112  private val robWalkEnd = io.fromRob.walkEnd || robWalkEndReg
113
114  when(io.redirect.valid) {
115    robWalkEndReg := false.B
116  }.elsewhen(io.fromRob.walkEnd) {
117    robWalkEndReg := true.B
118  }
119
120  val realNeedAlloc = io.req.map(req => req.valid && req.bits.needWriteRf)
121  val enqCount    = PopCount(realNeedAlloc)
122  val commitNum = Wire(UInt(log2Up(RabCommitWidth).W))
123  val walkNum = Wire(UInt(log2Up(RabCommitWidth).W))
124  commitNum := Mux(io.commits.commitValid(0), PriorityMux((0 until RabCommitWidth).map(
125    i => io.commits.commitValid(RabCommitWidth - 1 - i) -> (RabCommitWidth - i).U
126  )), 0.U)
127  walkNum := Mux(io.commits.walkValid(0), PriorityMux((0 until RabCommitWidth).map(
128    i => io.commits.walkValid(RabCommitWidth - 1 - i) -> (RabCommitWidth-i).U
129  )), 0.U)
130  val commitCount = Mux(io.commits.isCommit && !io.commits.isWalk, commitNum, 0.U)
131  val walkCount   = Mux(io.commits.isWalk && !io.commits.isCommit, walkNum, 0.U)
132  val specialWalkCount = Mux(io.commits.isCommit && io.commits.isWalk, walkNum, 0.U)
133
134  // number of pair(ldest, pdest) ready to commit to arch_rat
135  val commitSize = RegInit(0.U(log2Up(size).W))
136  val walkSize = RegInit(0.U(log2Up(size).W))
137  val specialWalkSize = RegInit(0.U(log2Up(size).W))
138
139  val newCommitSize = io.fromRob.commitSize
140  val newWalkSize = io.fromRob.walkSize
141
142  val commitSizeNxt = commitSize + newCommitSize - commitCount
143  val walkSizeNxt = walkSize + newWalkSize - walkCount
144
145  val newSpecialWalkSize = Mux(io.redirect.valid && !io.snpt.useSnpt, commitSizeNxt, 0.U)
146  val specialWalkSizeNext = specialWalkSize + newSpecialWalkSize - specialWalkCount
147
148  commitSize := Mux(io.redirect.valid && !io.snpt.useSnpt, 0.U, commitSizeNxt)
149  specialWalkSize := specialWalkSizeNext
150  walkSize := Mux(io.redirect.valid, 0.U, walkSizeNxt)
151
152  walkPtrNext := MuxCase(walkPtr, Seq(
153    (state === s_idle && stateNext === s_walk) -> walkPtrSnapshots(snptSelect),
154    (state === s_special_walk && stateNext === s_walk) -> deqPtrVecNext.head,
155    (state === s_walk && io.snpt.useSnpt && io.redirect.valid) -> walkPtrSnapshots(snptSelect),
156    (state === s_walk) -> (walkPtr + walkCount),
157  ))
158
159  walkPtr := walkPtrNext
160
161  val walkCandidates   = VecInit(walkPtrOHVec.map(sel => Mux1H(sel, renameBufferEntries)))
162  val commitCandidates = VecInit(deqPtrOHVec.map(sel => Mux1H(sel, renameBufferEntries)))
163  val vcfgCandidates   = VecInit(vcfgPtrOHVec.map(sel => Mux1H(sel, renameBufferEntries)))
164
165  // update diff pointer
166  diffPtrNext := Mux(state === s_idle, diffPtr + newCommitSize, diffPtr)
167  diffPtr := diffPtrNext
168
169  // update vcfg pointer
170  // TODO: do not use diffPtrNext here
171  vcfgPtrOH := diffPtrNext.toOH
172
173  // update enq pointer
174  val enqPtrNext = Mux(
175    state === s_walk && stateNext === s_idle,
176    walkPtrNext,
177    enqPtr + enqCount
178  )
179  val enqPtrOHNext = Mux(
180    state === s_walk && stateNext === s_idle,
181    walkPtrNext.toOH,
182    enqPtrOHVec(enqCount)
183  )
184  enqPtr := enqPtrNext
185  enqPtrOH := enqPtrOHNext
186  enqPtrVecNext.zipWithIndex.map{ case(ptr, i) => ptr := enqPtrNext + i.U }
187  enqPtrVec := enqPtrVecNext
188
189  val deqPtrSteps = Mux1H(Seq(
190    (state === s_idle) -> commitCount,
191    (state === s_special_walk) -> specialWalkCount,
192  ))
193
194  // update deq pointer
195  val deqPtrNext = deqPtr + deqPtrSteps
196  val deqPtrOHNext = deqPtrOHVec(deqPtrSteps)
197  deqPtr := deqPtrNext
198  deqPtrOH := deqPtrOHNext
199  deqPtrVecNext.zipWithIndex.map{ case(ptr, i) => ptr := deqPtrNext + i.U }
200  deqPtrVec := deqPtrVecNext
201
202  val allocatePtrVec = VecInit((0 until RenameWidth).map(i => enqPtrVec(PopCount(realNeedAlloc.take(i))).value))
203  allocatePtrVec.zip(io.req).zip(realNeedAlloc).map{ case((allocatePtr, req), realNeedAlloc) =>
204    when(realNeedAlloc){
205      renameBuffer(allocatePtr).info := req.bits
206      renameBuffer(allocatePtr).robIdx.foreach(_ := req.bits.robIdx)
207    }
208  }
209
210  io.commits.isCommit := state === s_idle || state === s_special_walk
211  io.commits.isWalk := state === s_walk || state === s_special_walk
212
213  for(i <- 0 until RabCommitWidth) {
214    io.commits.commitValid(i) := state === s_idle && i.U < commitSize || state === s_special_walk && i.U < specialWalkSize
215    io.commits.walkValid(i) := state === s_walk && i.U < walkSize || state === s_special_walk && i.U < specialWalkSize
216    // special walk use commitPtr
217    io.commits.info(i) := Mux(state === s_idle || state === s_special_walk, commitCandidates(i).info, walkCandidates(i).info)
218    io.commits.robIdx.foreach(_(i) := Mux(state === s_idle || state === s_special_walk, commitCandidates(i).robIdx.get, walkCandidates(i).robIdx.get))
219  }
220
221  private val walkEndNext = walkSizeNxt === 0.U
222  private val specialWalkEndNext = specialWalkSize <= RabCommitWidth.U
223  // when robWalkEndReg is 1, walkSize donot increase and decrease RabCommitWidth per Cycle
224  private val walkEndNextCycle = (robWalkEndReg || io.fromRob.walkEnd && io.fromRob.walkSize === 0.U) && (walkSize <= RabCommitWidth.U)
225  // change state
226  state := stateNext
227  when(io.redirect.valid) {
228    when(io.snpt.useSnpt) {
229      stateNext := s_walk
230    }.otherwise {
231      stateNext := s_special_walk
232      vecLoadExcp := io.fromRob.vecLoadExcp
233      when(io.fromRob.vecLoadExcp.valid) {
234        currentVdIdx := 0.U
235      }
236    }
237  }.otherwise {
238    // change stateNext
239    switch(state) {
240      // this transaction is not used actually, just list all states
241      is(s_idle) {
242        stateNext := s_idle
243      }
244      is(s_special_walk) {
245        currentVdIdx := currentVdIdx + specialWalkCount
246        when(specialWalkEndNext) {
247          stateNext := s_walk
248          vecLoadExcp.valid := false.B
249        }
250      }
251      is(s_walk) {
252        when(walkEndNextCycle) {
253          stateNext := s_idle
254        }
255      }
256    }
257  }
258
259  val numValidEntries = distanceBetween(enqPtr, deqPtr)
260  val allowEnqueue = GatedValidRegNext(numValidEntries + enqCount <= (size - RenameWidth).U, true.B)
261
262  io.canEnq := allowEnqueue && state === s_idle
263  io.enqPtrVec := enqPtrVec
264
265  io.status.walkEnd := walkEndNext
266
267  for (i <- 0 until RabCommitWidth) {
268    io.toVecExcpMod.logicPhyRegMap(i).valid := (state === s_special_walk) && vecLoadExcp.valid &&
269      io.commits.commitValid(i)
270    io.toVecExcpMod.logicPhyRegMap(i).bits match {
271      case x =>
272        x.lreg := io.commits.info(i).ldest
273        x.preg := io.commits.info(i).pdest
274    }
275  }
276
277  // for difftest
278  io.diffCommits.foreach(_ := 0.U.asTypeOf(new DiffCommitIO))
279  io.diffCommits.foreach(_.isCommit := state === s_idle || state === s_special_walk)
280  for(i <- 0 until RabCommitWidth * MaxUopSize) {
281    io.diffCommits.foreach(_.commitValid(i) := (state === s_idle || state === s_special_walk) && i.U < newCommitSize)
282    io.diffCommits.foreach(_.info(i) := renameBufferEntries((diffPtr + i.U).value).info)
283  }
284
285  XSError(isBefore(enqPtr, deqPtr) && !isFull(enqPtr, deqPtr), "\ndeqPtr is older than enqPtr!\n")
286
287  QueuePerf(RabSize, numValidEntries, numValidEntries === size.U)
288
289  if (backendParams.debugEn) {
290    dontTouch(deqPtrVec)
291    dontTouch(walkPtrNext)
292    dontTouch(walkSizeNxt)
293    dontTouch(walkEndNext)
294    dontTouch(walkEndNextCycle)
295  }
296
297  XSPerfAccumulate("s_idle_to_idle", state === s_idle         && stateNext === s_idle)
298  XSPerfAccumulate("s_idle_to_swlk", state === s_idle         && stateNext === s_special_walk)
299  XSPerfAccumulate("s_idle_to_walk", state === s_idle         && stateNext === s_walk)
300  XSPerfAccumulate("s_swlk_to_idle", state === s_special_walk && stateNext === s_idle)
301  XSPerfAccumulate("s_swlk_to_swlk", state === s_special_walk && stateNext === s_special_walk)
302  XSPerfAccumulate("s_swlk_to_walk", state === s_special_walk && stateNext === s_walk)
303  XSPerfAccumulate("s_walk_to_idle", state === s_walk         && stateNext === s_idle)
304  XSPerfAccumulate("s_walk_to_swlk", state === s_walk         && stateNext === s_special_walk)
305  XSPerfAccumulate("s_walk_to_walk", state === s_walk         && stateNext === s_walk)
306
307  XSPerfAccumulate("disallow_enq_cycle", !allowEnqueue)
308  XSPerfAccumulate("disallow_enq_full_cycle", numValidEntries + enqCount > (size - RenameWidth).U)
309  XSPerfAccumulate("disallow_enq_not_idle_cycle", state =/= s_idle)
310}
311