xref: /XiangShan/src/main/scala/xiangshan/backend/rob/Rab.scala (revision bb2f3f51dd67f6e16e0cc1ffe43368c9fc7e4aef)
1package xiangshan.backend.rob
2
3import org.chipsalliance.cde.config.Parameters
4import chisel3._
5import chisel3.util._
6import xiangshan._
7import utils._
8import utility._
9import xiangshan.backend.Bundles.DynInst
10import xiangshan.backend.decode.VectorConstants
11import xiangshan.backend.rename.SnapshotGenerator
12
13class RenameBufferPtr(size: Int) extends CircularQueuePtr[RenameBufferPtr](size) {
14  def this()(implicit p: Parameters) = this(p(XSCoreParamsKey).RabSize)
15}
16
17object RenameBufferPtr {
18  def apply(flag: Boolean = false, v: Int = 0)(implicit p: Parameters): RenameBufferPtr = {
19    val ptr = Wire(new RenameBufferPtr(p(XSCoreParamsKey).RabSize))
20    ptr.flag := flag.B
21    ptr.value := v.U
22    ptr
23  }
24}
25
26class RenameBufferEntry(implicit p: Parameters) extends XSBundle {
27  val info = new RabCommitInfo
28  val robIdx = OptionWrapper(!env.FPGAPlatform, new RobPtr)
29}
30
31class RenameBuffer(size: Int)(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper {
32  val io = IO(new Bundle {
33    val redirect = Input(ValidIO(new Bundle {
34    }))
35
36    val req = Vec(RenameWidth, Flipped(ValidIO(new DynInst)))
37    val fromRob = new Bundle {
38      val walkSize = Input(UInt(log2Up(size).W))
39      val walkEnd = Input(Bool())
40      val commitSize = Input(UInt(log2Up(size).W))
41    }
42
43    val snpt = Input(new SnapshotPort)
44
45    val canEnq = Output(Bool())
46    val enqPtrVec = Output(Vec(RenameWidth, new RenameBufferPtr))
47
48    val commits = Output(new RabCommitIO)
49    val diffCommits = if (backendParams.debugEn) Some(Output(new DiffCommitIO)) else None
50
51    val status = Output(new Bundle {
52      val walkEnd = Bool()
53    })
54  })
55
56  // alias
57  private val snptSelect = io.snpt.snptSelect
58
59  // pointer
60  private val enqPtrVec = RegInit(VecInit.tabulate(RenameWidth)(idx => RenameBufferPtr(flag = false, idx)))
61  private val enqPtr = enqPtrVec.head
62  private val enqPtrOH = RegInit(1.U(size.W))
63  private val enqPtrOHShift = CircularShift(enqPtrOH)
64  // may shift [0, RenameWidth] steps
65  private val enqPtrOHVec = VecInit.tabulate(RenameWidth + 1)(enqPtrOHShift.left)
66  private val enqPtrVecNext = Wire(enqPtrVec.cloneType)
67
68  private val deqPtrVec = RegInit(VecInit.tabulate(RabCommitWidth)(idx => RenameBufferPtr(flag = false, idx)))
69  private val deqPtr = deqPtrVec.head
70  private val deqPtrOH = RegInit(1.U(size.W))
71  private val deqPtrOHShift = CircularShift(deqPtrOH)
72  private val deqPtrOHVec = VecInit.tabulate(RabCommitWidth + 1)(deqPtrOHShift.left)
73  private val deqPtrVecNext = Wire(deqPtrVec.cloneType)
74  XSError(deqPtr.toOH =/= deqPtrOH, p"wrong one-hot reg between $deqPtr and $deqPtrOH")
75
76  private val walkPtr = Reg(new RenameBufferPtr)
77  private val walkPtrOH = walkPtr.toOH
78  private val walkPtrOHVec = VecInit.tabulate(RabCommitWidth + 1)(CircularShift(walkPtrOH).left)
79  private val walkPtrNext = Wire(new RenameBufferPtr)
80
81  private val walkPtrSnapshots = SnapshotGenerator(enqPtr, io.snpt.snptEnq, io.snpt.snptDeq, io.redirect.valid, io.snpt.flushVec)
82
83  val vcfgPtrOH = RegInit(1.U(size.W))
84  val vcfgPtrOHShift = CircularShift(vcfgPtrOH)
85  // may shift [0, 2) steps
86  val vcfgPtrOHVec = VecInit.tabulate(2)(vcfgPtrOHShift.left)
87
88  val diffPtr = RegInit(0.U.asTypeOf(new RenameBufferPtr))
89  val diffPtrNext = Wire(new RenameBufferPtr)
90  // Regs
91  val renameBuffer = Reg(Vec(size, new RenameBufferEntry))
92  val renameBufferEntries = VecInit((0 until size) map (i => renameBuffer(i)))
93
94  val s_idle :: s_special_walk :: s_walk :: Nil = Enum(3)
95  val state = RegInit(s_idle)
96  val stateNext = WireInit(state) // otherwise keep state value
97
98  private val robWalkEndReg = RegInit(false.B)
99  private val robWalkEnd = io.fromRob.walkEnd || robWalkEndReg
100
101  when(io.redirect.valid) {
102    robWalkEndReg := false.B
103  }.elsewhen(io.fromRob.walkEnd) {
104    robWalkEndReg := true.B
105  }
106
107  val realNeedAlloc = io.req.map(req => req.valid && req.bits.needWriteRf)
108  val enqCount    = PopCount(realNeedAlloc)
109  val commitNum = Wire(UInt(3.W))
110  val walkNum = Wire(UInt(3.W))
111  commitNum := Mux(io.commits.commitValid(0), PriorityMux((0 until 6).map(
112    i => io.commits.commitValid(5-i) -> (6-i).U
113  )), 0.U)
114  walkNum := Mux(io.commits.walkValid(0), PriorityMux((0 until 6).map(
115    i => io.commits.walkValid(5-i) -> (6-i).U
116  )), 0.U)
117  val commitCount = Mux(io.commits.isCommit && !io.commits.isWalk, commitNum, 0.U)
118  val walkCount   = Mux(io.commits.isWalk && !io.commits.isCommit, walkNum, 0.U)
119  val specialWalkCount = Mux(io.commits.isCommit && io.commits.isWalk, walkNum, 0.U)
120
121  // number of pair(ldest, pdest) ready to commit to arch_rat
122  val commitSize = RegInit(0.U(log2Up(size).W))
123  val walkSize = RegInit(0.U(log2Up(size).W))
124  val specialWalkSize = RegInit(0.U(log2Up(size).W))
125
126  val newCommitSize = io.fromRob.commitSize
127  val newWalkSize = io.fromRob.walkSize
128
129  val commitSizeNxt = commitSize + newCommitSize - commitCount
130  val walkSizeNxt = walkSize + newWalkSize - walkCount
131
132  val newSpecialWalkSize = Mux(io.redirect.valid && !io.snpt.useSnpt, commitSizeNxt, 0.U)
133  val specialWalkSizeNext = specialWalkSize + newSpecialWalkSize - specialWalkCount
134
135  commitSize := Mux(io.redirect.valid && !io.snpt.useSnpt, 0.U, commitSizeNxt)
136  specialWalkSize := specialWalkSizeNext
137  walkSize := Mux(io.redirect.valid, 0.U, walkSizeNxt)
138
139  walkPtrNext := MuxCase(walkPtr, Seq(
140    (state === s_idle && stateNext === s_walk) -> walkPtrSnapshots(snptSelect),
141    (state === s_special_walk && stateNext === s_walk) -> deqPtrVecNext.head,
142    (state === s_walk && io.snpt.useSnpt && io.redirect.valid) -> walkPtrSnapshots(snptSelect),
143    (state === s_walk) -> (walkPtr + walkCount),
144  ))
145
146  walkPtr := walkPtrNext
147
148  val walkCandidates   = VecInit(walkPtrOHVec.map(sel => Mux1H(sel, renameBufferEntries)))
149  val commitCandidates = VecInit(deqPtrOHVec.map(sel => Mux1H(sel, renameBufferEntries)))
150  val vcfgCandidates   = VecInit(vcfgPtrOHVec.map(sel => Mux1H(sel, renameBufferEntries)))
151
152  // update diff pointer
153  diffPtrNext := Mux(state === s_idle, diffPtr + newCommitSize, diffPtr)
154  diffPtr := diffPtrNext
155
156  // update vcfg pointer
157  // TODO: do not use diffPtrNext here
158  vcfgPtrOH := diffPtrNext.toOH
159
160  // update enq pointer
161  val enqPtrNext = Mux(
162    state === s_walk && stateNext === s_idle,
163    walkPtrNext,
164    enqPtr + enqCount
165  )
166  val enqPtrOHNext = Mux(
167    state === s_walk && stateNext === s_idle,
168    walkPtrNext.toOH,
169    enqPtrOHVec(enqCount)
170  )
171  enqPtr := enqPtrNext
172  enqPtrOH := enqPtrOHNext
173  enqPtrVecNext.zipWithIndex.map{ case(ptr, i) => ptr := enqPtrNext + i.U }
174  enqPtrVec := enqPtrVecNext
175
176  val deqPtrSteps = Mux1H(Seq(
177    (state === s_idle) -> commitCount,
178    (state === s_special_walk) -> specialWalkCount,
179  ))
180
181  // update deq pointer
182  val deqPtrNext = deqPtr + deqPtrSteps
183  val deqPtrOHNext = deqPtrOHVec(deqPtrSteps)
184  deqPtr := deqPtrNext
185  deqPtrOH := deqPtrOHNext
186  deqPtrVecNext.zipWithIndex.map{ case(ptr, i) => ptr := deqPtrNext + i.U }
187  deqPtrVec := deqPtrVecNext
188
189  val allocatePtrVec = VecInit((0 until RenameWidth).map(i => enqPtrVec(PopCount(realNeedAlloc.take(i))).value))
190  allocatePtrVec.zip(io.req).zip(realNeedAlloc).map{ case((allocatePtr, req), realNeedAlloc) =>
191    when(realNeedAlloc){
192      renameBuffer(allocatePtr).info := req.bits
193      renameBuffer(allocatePtr).robIdx.foreach(_ := req.bits.robIdx)
194    }
195  }
196
197  io.commits.isCommit := state === s_idle || state === s_special_walk
198  io.commits.isWalk := state === s_walk || state === s_special_walk
199
200  for(i <- 0 until RabCommitWidth) {
201    io.commits.commitValid(i) := state === s_idle && i.U < commitSize || state === s_special_walk && i.U < specialWalkSize
202    io.commits.walkValid(i) := state === s_walk && i.U < walkSize || state === s_special_walk && i.U < specialWalkSize
203    // special walk use commitPtr
204    io.commits.info(i) := Mux(state === s_idle || state === s_special_walk, commitCandidates(i).info, walkCandidates(i).info)
205    io.commits.robIdx.foreach(_(i) := Mux(state === s_idle || state === s_special_walk, commitCandidates(i).robIdx.get, walkCandidates(i).robIdx.get))
206  }
207
208  private val walkEndNext = walkSizeNxt === 0.U
209  private val specialWalkEndNext = specialWalkSizeNext === 0.U
210
211  // change state
212  state := stateNext
213  when(io.redirect.valid) {
214    when(io.snpt.useSnpt) {
215      stateNext := s_walk
216    }.otherwise {
217      stateNext := s_special_walk
218    }
219  }.otherwise {
220    // change stateNext
221    switch(state) {
222      // this transaction is not used actually, just list all states
223      is(s_idle) {
224        stateNext := s_idle
225      }
226      is(s_special_walk) {
227        when(specialWalkEndNext) {
228          stateNext := s_walk
229        }
230      }
231      is(s_walk) {
232        when(robWalkEnd && walkEndNext) {
233          stateNext := s_idle
234        }
235      }
236    }
237  }
238
239  val numValidEntries = distanceBetween(enqPtr, deqPtr)
240  val allowEnqueue = GatedValidRegNext(numValidEntries + enqCount <= (size - RenameWidth).U, true.B)
241
242  io.canEnq := allowEnqueue && state === s_idle
243  io.enqPtrVec := enqPtrVec
244
245  io.status.walkEnd := walkEndNext
246
247  // for difftest
248  io.diffCommits.foreach(_ := 0.U.asTypeOf(new DiffCommitIO))
249  io.diffCommits.foreach(_.isCommit := state === s_idle || state === s_special_walk)
250  for(i <- 0 until RabCommitWidth * MaxUopSize) {
251    io.diffCommits.foreach(_.commitValid(i) := (state === s_idle || state === s_special_walk) && i.U < newCommitSize)
252    io.diffCommits.foreach(_.info(i) := renameBufferEntries((diffPtr + i.U).value).info)
253  }
254
255  XSError(isBefore(enqPtr, deqPtr) && !isFull(enqPtr, deqPtr), "\ndeqPtr is older than enqPtr!\n")
256
257  QueuePerf(RabSize, numValidEntries, numValidEntries === size.U)
258
259  if (backendParams.debugEn) {
260    dontTouch(deqPtrVec)
261    dontTouch(walkPtrNext)
262  }
263
264  XSPerfAccumulate("s_idle_to_idle", state === s_idle         && stateNext === s_idle)
265  XSPerfAccumulate("s_idle_to_swlk", state === s_idle         && stateNext === s_special_walk)
266  XSPerfAccumulate("s_idle_to_walk", state === s_idle         && stateNext === s_walk)
267  XSPerfAccumulate("s_swlk_to_idle", state === s_special_walk && stateNext === s_idle)
268  XSPerfAccumulate("s_swlk_to_swlk", state === s_special_walk && stateNext === s_special_walk)
269  XSPerfAccumulate("s_swlk_to_walk", state === s_special_walk && stateNext === s_walk)
270  XSPerfAccumulate("s_walk_to_idle", state === s_walk         && stateNext === s_idle)
271  XSPerfAccumulate("s_walk_to_swlk", state === s_walk         && stateNext === s_special_walk)
272  XSPerfAccumulate("s_walk_to_walk", state === s_walk         && stateNext === s_walk)
273
274  XSPerfAccumulate("disallow_enq_cycle", !allowEnqueue)
275  XSPerfAccumulate("disallow_enq_full_cycle", numValidEntries + enqCount > (size - RenameWidth).U)
276  XSPerfAccumulate("disallow_enq_not_idle_cycle", state =/= s_idle)
277}
278