xref: /XiangShan/src/main/scala/xiangshan/backend/rob/Rab.scala (revision c4b56310b9f6edacd8ee65bfdd7dd13b260a316c)
1package xiangshan.backend.rob
2
3import org.chipsalliance.cde.config.Parameters
4import chisel3._
5import chisel3.util._
6import xiangshan._
7import utils._
8import utility._
9import xiangshan.backend.Bundles.DynInst
10import xiangshan.backend.decode.VectorConstants
11import xiangshan.backend.rename.SnapshotGenerator
12
13class RenameBufferPtr(size: Int) extends CircularQueuePtr[RenameBufferPtr](size) {
14  def this()(implicit p: Parameters) = this(p(XSCoreParamsKey).RabSize)
15}
16
17object RenameBufferPtr {
18  def apply(flag: Boolean = false, v: Int = 0)(implicit p: Parameters): RenameBufferPtr = {
19    val ptr = Wire(new RenameBufferPtr(p(XSCoreParamsKey).RabSize))
20    ptr.flag := flag.B
21    ptr.value := v.U
22    ptr
23  }
24}
25
26class RenameBufferEntry(implicit p: Parameters) extends RobCommitInfo {
27  val robIdx = new RobPtr
28}
29
30class RenameBuffer(size: Int)(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper {
31  val io = IO(new Bundle {
32    val redirect = Input(ValidIO(new Bundle {
33    }))
34
35    val req = Vec(RenameWidth, Flipped(ValidIO(new DynInst)))
36    val fromRob = new Bundle {
37      val walkSize = Input(UInt(log2Up(size).W))
38      val walkEnd = Input(Bool())
39      val commitSize = Input(UInt(log2Up(size).W))
40    }
41
42    val snpt = Input(new SnapshotPort)
43
44    val canEnq = Output(Bool())
45    val enqPtrVec = Output(Vec(RenameWidth, new RenameBufferPtr))
46    val vconfigPdest = Output(UInt(PhyRegIdxWidth.W))
47    val commits = Output(new RobCommitIO)
48    val diffCommits = Output(new DiffCommitIO)
49
50    val status = Output(new Bundle {
51      val walkEnd = Bool()
52    })
53  })
54
55  // alias
56  private val snptSelect = io.snpt.snptSelect
57
58  // pointer
59  private val enqPtrVec = RegInit(VecInit.tabulate(RenameWidth)(idx => RenameBufferPtr(flag = false, idx)))
60  private val enqPtr = enqPtrVec.head
61  private val enqPtrOH = RegInit(1.U(size.W))
62  private val enqPtrOHShift = CircularShift(enqPtrOH)
63  // may shift [0, RenameWidth] steps
64  private val enqPtrOHVec = VecInit.tabulate(RenameWidth + 1)(enqPtrOHShift.left)
65  private val enqPtrVecNext = Wire(enqPtrVec.cloneType)
66
67  private val deqPtrVec = RegInit(VecInit.tabulate(CommitWidth)(idx => RenameBufferPtr(flag = false, idx)))
68  private val deqPtr = deqPtrVec.head
69  private val deqPtrOH = RegInit(1.U(size.W))
70  private val deqPtrOHShift = CircularShift(deqPtrOH)
71  private val deqPtrOHVec = VecInit.tabulate(CommitWidth + 1)(deqPtrOHShift.left)
72  private val deqPtrVecNext = Wire(deqPtrVec.cloneType)
73  XSError(deqPtr.toOH =/= deqPtrOH, p"wrong one-hot reg between $deqPtr and $deqPtrOH")
74
75  private val walkPtr = Reg(new RenameBufferPtr)
76  private val walkPtrOH = walkPtr.toOH
77  private val walkPtrOHVec = VecInit.tabulate(CommitWidth + 1)(CircularShift(walkPtrOH).left)
78  private val walkPtrNext = Wire(new RenameBufferPtr)
79
80  private val snptEnq = io.canEnq && io.req.head.valid && io.req.head.bits.snapshot
81  private val walkPtrSnapshots = SnapshotGenerator(enqPtr, snptEnq, io.snpt.snptDeq, io.redirect.valid, io.snpt.flushVec)
82
83  // We should extra walk these preg pairs which compressed in rob enq entry at last cycle after restored snapshots.
84  // enq firstuop: b010100 --invert--> b101011 --keep only continuous 1s from head--> b000011
85  // enq firstuop: b111101 --invert--> b000010 --keep only continuous 1s from head--> b000000
86  private val enqCompressedLastCycleMask: UInt = VecInit(io.req.indices.map(i => io.req.slice(0, i + 1).map(!_.bits.firstUop).reduce(_ && _))).asUInt
87  private val compressedLastRobEntryMaskSnapshots = SnapshotGenerator(enqCompressedLastCycleMask, snptEnq, io.snpt.snptDeq, io.redirect.valid, io.snpt.flushVec)
88  private val compressedExtraWalkMask = compressedLastRobEntryMaskSnapshots(snptSelect)
89  // b111111 --Cat(x,1)--> b1111111 --Reverse--> b1111111 --PriorityEncoder--> 6.U
90  // b001111 --Cat(x,1)--> b0011111 --Reverse--> b1111100 --PriorityEncoder--> 4.U
91  // b000011 --Cat(x,1)--> b0000111 --Reverse--> b1110000 --PriorityEncoder--> 2.U
92  // b000000 --Cat(x,1)--> b0000001 --Reverse--> b1000000 --PriorityEncoder--> 0.U
93  private val compressedExtraWalkSize = PriorityMux(Reverse(Cat(compressedExtraWalkMask, 1.U(1.W))), (0 to RenameWidth).map(i => (RenameWidth - i).U))
94
95  // may shift [0, CommitWidth] steps
96  val headPtrOHVec2 = VecInit(Seq.tabulate(CommitWidth * MaxUopSize + 1)(_ % size).map(step => deqPtrOHShift.left(step)))
97
98  val vcfgPtrOH = RegInit(1.U(size.W))
99  val vcfgPtrOHShift = CircularShift(vcfgPtrOH)
100  // may shift [0, 2) steps
101  val vcfgPtrOHVec = VecInit.tabulate(2)(vcfgPtrOHShift.left)
102
103  val diffPtrOH = RegInit(1.U(size.W))
104  val diffPtrOHShift = CircularShift(diffPtrOH)
105  // may shift [0, CommitWidth * MaxUopSize] steps
106  val diffPtrOHVec = VecInit(Seq.tabulate(CommitWidth * MaxUopSize + 1)(_ % size).map(step => diffPtrOHShift.left(step)))
107
108  // Regs
109  val renameBuffer = Mem(size, new RenameBufferEntry)
110  val renameBufferEntries = (0 until size) map (i => renameBuffer(i))
111
112  val s_idle :: s_special_walk :: s_walk :: Nil = Enum(3)
113  val state = RegInit(s_idle)
114  val stateNext = WireInit(state) // otherwise keep state value
115
116  private val robWalkEndReg = RegInit(false.B)
117  private val robWalkEnd = io.fromRob.walkEnd || robWalkEndReg
118
119  when(io.redirect.valid) {
120    robWalkEndReg := false.B
121  }.elsewhen(io.fromRob.walkEnd) {
122    robWalkEndReg := true.B
123  }
124
125  val realNeedAlloc = io.req.map(req => req.valid && req.bits.needWriteRf)
126  val enqCount    = PopCount(realNeedAlloc)
127  val commitCount = Mux(io.commits.isCommit && !io.commits.isWalk, PopCount(io.commits.commitValid), 0.U)
128  val walkCount   = Mux(io.commits.isWalk && !io.commits.isCommit, PopCount(io.commits.walkValid), 0.U)
129  val specialWalkCount = Mux(io.commits.isCommit && io.commits.isWalk, PopCount(io.commits.walkValid), 0.U)
130
131  // number of pair(ldest, pdest) ready to commit to arch_rat
132  val commitSize = RegInit(0.U(log2Up(size).W))
133  val walkSize = RegInit(0.U(log2Up(size).W))
134  val specialWalkSize = RegInit(0.U(log2Up(size).W))
135
136  val newCommitSize = io.fromRob.commitSize
137  val newWalkSize = io.fromRob.walkSize
138
139  val commitSizeNxt = commitSize + newCommitSize - commitCount
140  val walkSizeNxt = walkSize + newWalkSize - walkCount
141
142  val newSpecialWalkSize = Mux(io.redirect.valid && !io.snpt.useSnpt, commitSizeNxt, 0.U)
143  val specialWalkSizeNext = specialWalkSize + newSpecialWalkSize - specialWalkCount
144
145  commitSize := Mux(io.redirect.valid && !io.snpt.useSnpt, 0.U, commitSizeNxt)
146  specialWalkSize := specialWalkSizeNext
147  walkSize := Mux(io.redirect.valid, Mux(io.snpt.useSnpt, compressedExtraWalkSize, 0.U), walkSizeNxt)
148
149  walkPtrNext := MuxCase(walkPtr, Seq(
150    (state === s_idle && stateNext === s_walk) -> walkPtrSnapshots(snptSelect),
151    (state === s_special_walk && stateNext === s_walk) -> deqPtrVecNext.head,
152    (state === s_walk && io.snpt.useSnpt && io.redirect.valid) -> walkPtrSnapshots(snptSelect),
153    (state === s_walk) -> (walkPtr + walkCount),
154  ))
155
156  walkPtr := walkPtrNext
157
158  val walkCandidates   = VecInit(walkPtrOHVec.map(sel => Mux1H(sel, renameBufferEntries)))
159  val commitCandidates = VecInit(deqPtrOHVec.map(sel => Mux1H(sel, renameBufferEntries)))
160  val vcfgCandidates   = VecInit(vcfgPtrOHVec.map(sel => Mux1H(sel, renameBufferEntries)))
161  val diffCandidates   = VecInit(diffPtrOHVec.map(sel => Mux1H(sel, renameBufferEntries)))
162
163  // update diff pointer
164  val diffPtrOHNext = Mux(state === s_idle, diffPtrOHVec(newCommitSize), diffPtrOH)
165  diffPtrOH := diffPtrOHNext
166
167  // update vcfg pointer
168  vcfgPtrOH := diffPtrOHNext
169
170  // update enq pointer
171  val enqPtrNext = Mux(
172    state === s_walk && stateNext === s_idle,
173    walkPtrNext,
174    enqPtr + enqCount
175  )
176  val enqPtrOHNext = Mux(
177    state === s_walk && stateNext === s_idle,
178    walkPtrNext.toOH,
179    enqPtrOHVec(enqCount)
180  )
181  enqPtr := enqPtrNext
182  enqPtrOH := enqPtrOHNext
183  enqPtrVecNext.zipWithIndex.map{ case(ptr, i) => ptr := enqPtrNext + i.U }
184  enqPtrVec := enqPtrVecNext
185
186  val deqPtrSteps = Mux1H(Seq(
187    (state === s_idle) -> commitCount,
188    (state === s_special_walk) -> specialWalkCount,
189  ))
190
191  // update deq pointer
192  val deqPtrNext = deqPtr + deqPtrSteps
193  val deqPtrOHNext = deqPtrOHVec(deqPtrSteps)
194  deqPtr := deqPtrNext
195  deqPtrOH := deqPtrOHNext
196  deqPtrVecNext.zipWithIndex.map{ case(ptr, i) => ptr := deqPtrNext + i.U }
197  deqPtrVec := deqPtrVecNext
198
199  val allocatePtrVec = VecInit((0 until RenameWidth).map(i => enqPtrVec(PopCount(realNeedAlloc.take(i))).value))
200  allocatePtrVec.zip(io.req).zip(realNeedAlloc).map{ case((allocatePtr, req), realNeedAlloc) =>
201    when(realNeedAlloc){
202      renameBuffer(allocatePtr).ldest := req.bits.ldest
203      renameBuffer(allocatePtr).pdest := req.bits.pdest
204      renameBuffer(allocatePtr).rfWen := req.bits.rfWen
205      renameBuffer(allocatePtr).fpWen := req.bits.fpWen
206      renameBuffer(allocatePtr).vecWen := req.bits.vecWen
207      renameBuffer(allocatePtr).isMove := req.bits.eliminatedMove
208      renameBuffer(allocatePtr).robIdx := req.bits.robIdx
209    }
210  }
211
212  io.commits.isCommit := state === s_idle || state === s_special_walk
213  io.commits.isWalk := state === s_walk || state === s_special_walk
214
215  for(i <- 0 until CommitWidth) {
216    io.commits.commitValid(i) := state === s_idle && i.U < commitSize || state === s_special_walk && i.U < specialWalkSize
217    io.commits.walkValid(i) := state === s_walk && i.U < walkSize || state === s_special_walk && i.U < specialWalkSize
218    // special walk use commitPtr
219    io.commits.info(i) := Mux(state === s_idle || state === s_special_walk, commitCandidates(i), walkCandidates(i))
220    // Todo: remove this
221    io.commits.robIdx(i) := Mux(state === s_idle || state === s_special_walk, commitCandidates(i).robIdx, walkCandidates(i).robIdx)
222  }
223
224  private val walkEndNext = walkSizeNxt === 0.U
225  private val specialWalkEndNext = specialWalkSizeNext === 0.U
226
227  // change state
228  state := stateNext
229  when(io.redirect.valid) {
230    when(io.snpt.useSnpt) {
231      stateNext := s_walk
232    }.otherwise {
233      stateNext := s_special_walk
234    }
235  }.otherwise {
236    // change stateNext
237    switch(state) {
238      // this transaction is not used actually, just list all states
239      is(s_idle) {
240        stateNext := s_idle
241      }
242      is(s_special_walk) {
243        when(specialWalkEndNext) {
244          stateNext := s_walk
245        }
246      }
247      is(s_walk) {
248        when(robWalkEnd && walkEndNext) {
249          stateNext := s_idle
250        }
251      }
252    }
253  }
254
255  val numValidEntries = distanceBetween(enqPtr, deqPtr)
256  val allowEnqueue = RegNext(numValidEntries + enqCount <= (size - RenameWidth).U, true.B)
257
258  io.canEnq := allowEnqueue && state === s_idle
259  io.enqPtrVec := enqPtrVec
260
261  io.status.walkEnd := walkEndNext
262
263  io.vconfigPdest := Mux(vcfgCandidates(0).ldest === VCONFIG_IDX.U && vcfgCandidates(0).vecWen, vcfgCandidates(0).pdest, vcfgCandidates(1).pdest)
264
265  // for difftest
266  io.diffCommits := 0.U.asTypeOf(new DiffCommitIO)
267  io.diffCommits.isCommit := state === s_idle || state === s_special_walk
268  for(i <- 0 until CommitWidth * MaxUopSize) {
269    io.diffCommits.commitValid(i) := (state === s_idle || state === s_special_walk) && i.U < newCommitSize
270    io.diffCommits.info(i) := diffCandidates(i)
271  }
272
273  XSError(isBefore(enqPtr, deqPtr) && !isFull(enqPtr, deqPtr), "\ndeqPtr is older than enqPtr!\n")
274
275  QueuePerf(RabSize, numValidEntries, numValidEntries === size.U)
276
277  XSPerfAccumulate("s_idle_to_idle", state === s_idle         && stateNext === s_idle)
278  XSPerfAccumulate("s_idle_to_swlk", state === s_idle         && stateNext === s_special_walk)
279  XSPerfAccumulate("s_idle_to_walk", state === s_idle         && stateNext === s_walk)
280  XSPerfAccumulate("s_swlk_to_idle", state === s_special_walk && stateNext === s_idle)
281  XSPerfAccumulate("s_swlk_to_swlk", state === s_special_walk && stateNext === s_special_walk)
282  XSPerfAccumulate("s_swlk_to_walk", state === s_special_walk && stateNext === s_walk)
283  XSPerfAccumulate("s_walk_to_idle", state === s_walk         && stateNext === s_idle)
284  XSPerfAccumulate("s_walk_to_swlk", state === s_walk         && stateNext === s_special_walk)
285  XSPerfAccumulate("s_walk_to_walk", state === s_walk         && stateNext === s_walk)
286
287  XSPerfAccumulate("disallow_enq_cycle", !allowEnqueue)
288  XSPerfAccumulate("disallow_enq_full_cycle", numValidEntries + enqCount > (size - RenameWidth).U)
289  XSPerfAccumulate("disallow_enq_not_idle_cycle", state =/= s_idle)
290}
291