xref: /XiangShan/src/main/scala/xiangshan/backend/rename/Rename.scala (revision a3fe955f74cbccdabd470e29ff58d4077b066d22)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.backend.rename
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import utility._
23import utils._
24import xiangshan._
25import xiangshan.backend.Bundles.{DecodedInst, DynInst}
26import xiangshan.backend.decode.{FusionDecodeInfo, ImmUnion, Imm_I, Imm_LUI_LOAD, Imm_U}
27import xiangshan.backend.fu.FuType
28import xiangshan.backend.rename.freelist._
29import xiangshan.backend.rob.{RobEnqIO, RobPtr}
30import xiangshan.mem.mdp._
31import xiangshan.ExceptionNO._
32import xiangshan.backend.fu.FuType._
33import xiangshan.mem.{EewLog2, GenUSWholeEmul}
34import xiangshan.mem.GenRealFlowNum
35
36class Rename(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper with HasPerfEvents {
37
38  // params alias
39  private val numRegSrc = backendParams.numRegSrc
40  private val numVecRegSrc = backendParams.numVecRegSrc
41  private val numVecRatPorts = numVecRegSrc
42
43  println(s"[Rename] numRegSrc: $numRegSrc")
44
45  val io = IO(new Bundle() {
46    val redirect = Flipped(ValidIO(new Redirect))
47    val rabCommits = Input(new RabCommitIO)
48    // from csr
49    val singleStep = Input(Bool())
50    // from decode
51    val in = Vec(RenameWidth, Flipped(DecoupledIO(new DecodedInst)))
52    val fusionInfo = Vec(DecodeWidth - 1, Flipped(new FusionDecodeInfo))
53    // ssit read result
54    val ssit = Flipped(Vec(RenameWidth, Output(new SSITEntry)))
55    // waittable read result
56    val waittable = Flipped(Vec(RenameWidth, Output(Bool())))
57    // to rename table
58    val intReadPorts = Vec(RenameWidth, Vec(2, Input(UInt(PhyRegIdxWidth.W))))
59    val fpReadPorts = Vec(RenameWidth, Vec(3, Input(UInt(PhyRegIdxWidth.W))))
60    val vecReadPorts = Vec(RenameWidth, Vec(numVecRatPorts, Input(UInt(PhyRegIdxWidth.W))))
61    val v0ReadPorts = Vec(RenameWidth, Vec(1, Input(UInt(PhyRegIdxWidth.W))))
62    val vlReadPorts = Vec(RenameWidth, Vec(1, Input(UInt(PhyRegIdxWidth.W))))
63    val intRenamePorts = Vec(RenameWidth, Output(new RatWritePort(log2Ceil(IntLogicRegs))))
64    val fpRenamePorts = Vec(RenameWidth, Output(new RatWritePort(log2Ceil(FpLogicRegs))))
65    val vecRenamePorts = Vec(RenameWidth, Output(new RatWritePort(log2Ceil(VecLogicRegs))))
66    val v0RenamePorts = Vec(RenameWidth, Output(new RatWritePort(log2Ceil(V0LogicRegs))))
67    val vlRenamePorts = Vec(RenameWidth, Output(new RatWritePort(log2Ceil(VlLogicRegs))))
68    // from rename table
69    val int_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W)))
70    val fp_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W)))
71    val vec_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W)))
72    val v0_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W)))
73    val vl_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W)))
74    val int_need_free = Vec(RabCommitWidth, Input(Bool()))
75    // to dispatch1
76    val out = Vec(RenameWidth, DecoupledIO(new DynInst))
77    // for snapshots
78    val snpt = Input(new SnapshotPort)
79    val snptLastEnq = Flipped(ValidIO(new RobPtr))
80    val snptIsFull= Input(Bool())
81    // debug arch ports
82    val debug_int_rat = if (backendParams.debugEn) Some(Vec(32, Input(UInt(PhyRegIdxWidth.W)))) else None
83    val debug_fp_rat = if (backendParams.debugEn) Some(Vec(32, Input(UInt(PhyRegIdxWidth.W)))) else None
84    val debug_vec_rat = if (backendParams.debugEn) Some(Vec(31, Input(UInt(PhyRegIdxWidth.W)))) else None
85    val debug_v0_rat = if (backendParams.debugEn) Some(Vec(1, Input(UInt(PhyRegIdxWidth.W)))) else None
86    val debug_vl_rat = if (backendParams.debugEn) Some(Vec(1, Input(UInt(PhyRegIdxWidth.W)))) else None
87    // perf only
88    val stallReason = new Bundle {
89      val in = Flipped(new StallReasonIO(RenameWidth))
90      val out = new StallReasonIO(RenameWidth)
91    }
92  })
93
94  // io alias
95  private val dispatchCanAcc = io.out.head.ready
96
97  val compressUnit = Module(new CompressUnit())
98  // create free list and rat
99  val intFreeList = Module(new MEFreeList(IntPhyRegs))
100  val fpFreeList = Module(new StdFreeList(FpPhyRegs - FpLogicRegs, FpLogicRegs, Reg_F))
101  val vecFreeList = Module(new StdFreeList(VfPhyRegs - VecLogicRegs, VecLogicRegs, Reg_V, 31))
102  val v0FreeList = Module(new StdFreeList(V0PhyRegs - V0LogicRegs, V0LogicRegs, Reg_V0, 1))
103  val vlFreeList = Module(new StdFreeList(VlPhyRegs - VlLogicRegs, VlLogicRegs, Reg_Vl, 1))
104
105
106  intFreeList.io.commit    <> io.rabCommits
107  intFreeList.io.debug_rat.foreach(_ <> io.debug_int_rat.get)
108  fpFreeList.io.commit     <> io.rabCommits
109  fpFreeList.io.debug_rat.foreach(_ <> io.debug_fp_rat.get)
110  vecFreeList.io.commit    <> io.rabCommits
111  vecFreeList.io.debug_rat.foreach(_ <> io.debug_vec_rat.get)
112  v0FreeList.io.commit <> io.rabCommits
113  v0FreeList.io.debug_rat.foreach(_ <> io.debug_v0_rat.get)
114  vlFreeList.io.commit <> io.rabCommits
115  vlFreeList.io.debug_rat.foreach(_ <> io.debug_vl_rat.get)
116
117  // decide if given instruction needs allocating a new physical register (CfCtrl: from decode; RobCommitInfo: from rob)
118  def needDestReg[T <: DecodedInst](reg_t: RegType, x: T): Bool = reg_t match {
119    case Reg_I => x.rfWen && x.ldest =/= 0.U
120    case Reg_F => x.fpWen
121    case Reg_V => x.vecWen
122    case Reg_V0 => x.v0Wen
123    case Reg_Vl => x.vlWen
124  }
125  def needDestRegCommit[T <: RabCommitInfo](reg_t: RegType, x: T): Bool = {
126    reg_t match {
127      case Reg_I => x.rfWen
128      case Reg_F => x.fpWen
129      case Reg_V => x.vecWen
130      case Reg_V0 => x.v0Wen
131      case Reg_Vl => x.vlWen
132    }
133  }
134  def needDestRegWalk[T <: RabCommitInfo](reg_t: RegType, x: T): Bool = {
135    reg_t match {
136      case Reg_I => x.rfWen && x.ldest =/= 0.U
137      case Reg_F => x.fpWen
138      case Reg_V => x.vecWen
139      case Reg_V0 => x.v0Wen
140      case Reg_Vl => x.vlWen
141    }
142  }
143
144  // connect [redirect + walk] ports for fp & vec & int free list
145  Seq(fpFreeList, vecFreeList, intFreeList, v0FreeList, vlFreeList).foreach { case fl =>
146    fl.io.redirect := io.redirect.valid
147    fl.io.walk := io.rabCommits.isWalk
148  }
149  // only when all free list and dispatch1 has enough space can we do allocation
150  // when isWalk, freelist can definitely allocate
151  intFreeList.io.doAllocate := fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk
152  fpFreeList.io.doAllocate := intFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk
153  vecFreeList.io.doAllocate := intFreeList.io.canAllocate && fpFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk
154  v0FreeList.io.doAllocate := intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && vlFreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk
155  vlFreeList.io.doAllocate := intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk
156
157  //           dispatch1 ready ++ float point free list ready ++ int free list ready ++ vec free list ready     ++ not walk
158  val canOut = dispatchCanAcc && fpFreeList.io.canAllocate && intFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !io.rabCommits.isWalk
159
160  compressUnit.io.in.zip(io.in).foreach{ case(sink, source) =>
161    sink.valid := source.valid && !io.singleStep
162    sink.bits := source.bits
163  }
164  val needRobFlags = compressUnit.io.out.needRobFlags
165  val instrSizesVec = compressUnit.io.out.instrSizes
166  val compressMasksVec = compressUnit.io.out.masks
167
168  // speculatively assign the instruction with an robIdx
169  val validCount = PopCount(io.in.zip(needRobFlags).map{ case(in, needRobFlag) => in.valid && in.bits.lastUop && needRobFlag}) // number of instructions waiting to enter rob (from decode)
170  val robIdxHead = RegInit(0.U.asTypeOf(new RobPtr))
171  val lastCycleMisprediction = GatedValidRegNext(io.redirect.valid && !io.redirect.bits.flushItself())
172  val robIdxHeadNext = Mux(io.redirect.valid, io.redirect.bits.robIdx, // redirect: move ptr to given rob index
173         Mux(lastCycleMisprediction, robIdxHead + 1.U, // mis-predict: not flush robIdx itself
174           Mux(canOut, robIdxHead + validCount, // instructions successfully entered next stage: increase robIdx
175                      /* default */  robIdxHead))) // no instructions passed by this cycle: stick to old value
176  robIdxHead := robIdxHeadNext
177
178  /**
179    * Rename: allocate free physical register and update rename table
180    */
181  val uops = Wire(Vec(RenameWidth, new DynInst))
182  uops.foreach( uop => {
183    uop.srcState      := DontCare
184    uop.debugInfo     := DontCare
185    uop.lqIdx         := DontCare
186    uop.sqIdx         := DontCare
187    uop.waitForRobIdx := DontCare
188    uop.singleStep    := DontCare
189    uop.snapshot      := DontCare
190    uop.srcLoadDependency := DontCare
191    uop.numLsElem       :=  DontCare
192    uop.hasException  :=  DontCare
193    uop.useRegCache   := DontCare
194    uop.regCacheIdx   := DontCare
195  })
196  private val fuType       = uops.map(_.fuType)
197  private val fuOpType     = uops.map(_.fuOpType)
198  private val vtype        = uops.map(_.vpu.vtype)
199  private val sew          = vtype.map(_.vsew)
200  private val lmul         = vtype.map(_.vlmul)
201  private val eew          = uops.map(_.vpu.veew)
202  private val mop          = fuOpType.map(fuOpTypeItem => LSUOpType.getVecLSMop(fuOpTypeItem))
203  private val isVlsType    = fuType.map(fuTypeItem => isVls(fuTypeItem))
204  private val isSegment    = fuType.map(fuTypeItem => isVsegls(fuTypeItem))
205  private val isUnitStride = fuOpType.map(fuOpTypeItem => LSUOpType.isAllUS(fuOpTypeItem))
206  private val nf           = fuOpType.zip(uops.map(_.vpu.nf)).map { case (fuOpTypeItem, nfItem) => Mux(LSUOpType.isWhole(fuOpTypeItem), 0.U, nfItem) }
207  private val mulBits      = 3 // dirty code
208  private val emul         = fuOpType.zipWithIndex.map { case (fuOpTypeItem, index) =>
209    Mux(
210      LSUOpType.isWhole(fuOpTypeItem),
211      GenUSWholeEmul(nf(index)),
212      Mux(
213        LSUOpType.isMasked(fuOpTypeItem),
214        0.U(mulBits.W),
215        EewLog2(eew(index)) - sew(index) + lmul(index)
216      )
217    )
218  }
219  private val isVecUnitType = isVlsType.zip(isUnitStride).map { case (isVlsTypeItme, isUnitStrideItem) =>
220    isVlsTypeItme && isUnitStrideItem
221  }
222  private val instType = isSegment.zip(mop).map { case (isSegementItem, mopItem) => Cat(isSegementItem, mopItem) }
223  // There is no way to calculate the 'flow' for 'unit-stride' exactly:
224  //  Whether 'unit-stride' needs to be split can only be known after obtaining the address.
225  // For scalar instructions, this is not handled here, and different assignments are done later according to the situation.
226  private val numLsElem = instType.zipWithIndex.map { case (instTypeItem, index) =>
227    Mux(
228      isVecUnitType(index),
229      VecMemUnitStrideMaxFlowNum.U,
230      GenRealFlowNum(instTypeItem, emul(index), lmul(index), eew(index), sew(index))
231    )
232  }
233  uops.zipWithIndex.map { case(u, i) =>
234    u.numLsElem := Mux(io.in(i).valid & isVlsType(i), numLsElem(i), 0.U)
235  }
236
237  val needVecDest    = Wire(Vec(RenameWidth, Bool()))
238  val needFpDest     = Wire(Vec(RenameWidth, Bool()))
239  val needIntDest    = Wire(Vec(RenameWidth, Bool()))
240  val needV0Dest     = Wire(Vec(RenameWidth, Bool()))
241  val needVlDest     = Wire(Vec(RenameWidth, Bool()))
242  private val inHeadValid = io.in.head.valid
243
244  val isMove = Wire(Vec(RenameWidth, Bool()))
245  isMove zip io.in.map(_.bits) foreach {
246    case (move, in) => move := Mux(in.exceptionVec.asUInt.orR, false.B, in.isMove)
247  }
248
249  val walkNeedIntDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B)))
250  val walkNeedFpDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B)))
251  val walkNeedVecDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B)))
252  val walkNeedV0Dest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B)))
253  val walkNeedVlDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B)))
254  val walkIsMove = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B)))
255
256  val intSpecWen = Wire(Vec(RenameWidth, Bool()))
257  val fpSpecWen  = Wire(Vec(RenameWidth, Bool()))
258  val vecSpecWen = Wire(Vec(RenameWidth, Bool()))
259  val v0SpecWen = Wire(Vec(RenameWidth, Bool()))
260  val vlSpecWen = Wire(Vec(RenameWidth, Bool()))
261
262  val walkIntSpecWen = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B)))
263
264  val walkPdest = Wire(Vec(RenameWidth, UInt(PhyRegIdxWidth.W)))
265
266  // uop calculation
267  for (i <- 0 until RenameWidth) {
268    (uops(i): Data).waiveAll :<= (io.in(i).bits: Data).waiveAll
269
270    // update cf according to ssit result
271    uops(i).storeSetHit := io.ssit(i).valid
272    uops(i).loadWaitStrict := io.ssit(i).strict && io.ssit(i).valid
273    uops(i).ssid := io.ssit(i).ssid
274
275    // update cf according to waittable result
276    uops(i).loadWaitBit := io.waittable(i)
277
278    uops(i).replayInst := false.B // set by IQ or MemQ
279    // alloc a new phy reg
280    needV0Dest(i) := io.in(i).valid && needDestReg(Reg_V0, io.in(i).bits)
281    needVlDest(i) := io.in(i).valid && needDestReg(Reg_Vl, io.in(i).bits)
282    needVecDest(i) := io.in(i).valid && needDestReg(Reg_V, io.in(i).bits)
283    needFpDest(i) := io.in(i).valid && needDestReg(Reg_F, io.in(i).bits)
284    needIntDest(i) := io.in(i).valid && needDestReg(Reg_I, io.in(i).bits)
285    if (i < RabCommitWidth) {
286      walkNeedIntDest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_I, io.rabCommits.info(i))
287      walkNeedFpDest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_F, io.rabCommits.info(i))
288      walkNeedVecDest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_V, io.rabCommits.info(i))
289      walkNeedV0Dest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_V0, io.rabCommits.info(i))
290      walkNeedVlDest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_Vl, io.rabCommits.info(i))
291      walkIsMove(i) := io.rabCommits.info(i).isMove
292    }
293    fpFreeList.io.allocateReq(i) := needFpDest(i)
294    fpFreeList.io.walkReq(i) := walkNeedFpDest(i)
295    vecFreeList.io.allocateReq(i) := needVecDest(i)
296    vecFreeList.io.walkReq(i) := walkNeedVecDest(i)
297    v0FreeList.io.allocateReq(i) := needV0Dest(i)
298    v0FreeList.io.walkReq(i) := walkNeedV0Dest(i)
299    vlFreeList.io.allocateReq(i) := needVlDest(i)
300    vlFreeList.io.walkReq(i) := walkNeedVlDest(i)
301    intFreeList.io.allocateReq(i) := needIntDest(i) && !isMove(i)
302    intFreeList.io.walkReq(i) := walkNeedIntDest(i) && !walkIsMove(i)
303
304    // no valid instruction from decode stage || all resources (dispatch1 + both free lists) ready
305    io.in(i).ready := !io.in(0).valid || canOut
306
307    uops(i).robIdx := robIdxHead + PopCount(io.in.zip(needRobFlags).take(i).map{ case(in, needRobFlag) => in.valid && in.bits.lastUop && needRobFlag})
308    uops(i).instrSize := instrSizesVec(i)
309    val hasExceptionExceptFlushPipe = Cat(selectFrontend(uops(i).exceptionVec) :+ uops(i).exceptionVec(illegalInstr) :+ uops(i).exceptionVec(virtualInstr)).orR || uops(i).trigger.getFrontendCanFire
310    when(isMove(i) || hasExceptionExceptFlushPipe) {
311      uops(i).numUops := 0.U
312      uops(i).numWB := 0.U
313    }
314    if (i > 0) {
315      when(!needRobFlags(i - 1)) {
316        uops(i).firstUop := false.B
317        uops(i).ftqPtr := uops(i - 1).ftqPtr
318        uops(i).ftqOffset := uops(i - 1).ftqOffset
319        uops(i).numUops := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse))
320        uops(i).numWB := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse))
321      }
322    }
323    when(!needRobFlags(i)) {
324      uops(i).lastUop := false.B
325      uops(i).numUops := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse))
326      uops(i).numWB := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse))
327    }
328    uops(i).wfflags := (compressMasksVec(i) & Cat(io.in.map(_.bits.wfflags).reverse)).orR
329    uops(i).dirtyFs := (compressMasksVec(i) & Cat(io.in.map(_.bits.fpWen).reverse)).orR
330    // vector instructions' uopSplitType cannot be UopSplitType.SCA_SIM
331    uops(i).dirtyVs := (compressMasksVec(i) & Cat(io.in.map(_.bits.uopSplitType =/= UopSplitType.SCA_SIM).reverse)).orR
332    // psrc0,psrc1,psrc2 don't require v0ReadPorts because their srcType can distinguish whether they are V0 or not
333    uops(i).psrc(0) := Mux1H(uops(i).srcType(0)(2, 0), Seq(io.intReadPorts(i)(0), io.fpReadPorts(i)(0), io.vecReadPorts(i)(0)))
334    uops(i).psrc(1) := Mux1H(uops(i).srcType(1)(2, 0), Seq(io.intReadPorts(i)(1), io.fpReadPorts(i)(1), io.vecReadPorts(i)(1)))
335    uops(i).psrc(2) := Mux1H(uops(i).srcType(2)(2, 1), Seq(io.fpReadPorts(i)(2), io.vecReadPorts(i)(2)))
336    uops(i).psrc(3) := io.v0ReadPorts(i)(0)
337    uops(i).psrc(4) := io.vlReadPorts(i)(0)
338
339    // int psrc2 should be bypassed from next instruction if it is fused
340    if (i < RenameWidth - 1) {
341      when (io.fusionInfo(i).rs2FromRs2 || io.fusionInfo(i).rs2FromRs1) {
342        uops(i).psrc(1) := Mux(io.fusionInfo(i).rs2FromRs2, io.intReadPorts(i + 1)(1), io.intReadPorts(i + 1)(0))
343      }.elsewhen(io.fusionInfo(i).rs2FromZero) {
344        uops(i).psrc(1) := 0.U
345      }
346    }
347    uops(i).eliminatedMove := isMove(i)
348
349    // update pdest
350    uops(i).pdest := MuxCase(0.U, Seq(
351      needIntDest(i)    ->  intFreeList.io.allocatePhyReg(i),
352      needFpDest(i)     ->  fpFreeList.io.allocatePhyReg(i),
353      needVecDest(i)    ->  vecFreeList.io.allocatePhyReg(i),
354      needV0Dest(i)    ->  v0FreeList.io.allocatePhyReg(i),
355      needVlDest(i)    ->  vlFreeList.io.allocatePhyReg(i),
356    ))
357
358    // Assign performance counters
359    uops(i).debugInfo.renameTime := GTimer()
360
361    io.out(i).valid := io.in(i).valid && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !io.rabCommits.isWalk
362    io.out(i).bits := uops(i)
363    // Todo: move these shit in decode stage
364    // dirty code for fence. The lsrc is passed by imm.
365    when (io.out(i).bits.fuType === FuType.fence.U) {
366      io.out(i).bits.imm := Cat(io.in(i).bits.lsrc(1), io.in(i).bits.lsrc(0))
367    }
368
369    // dirty code for SoftPrefetch (prefetch.r/prefetch.w)
370//    when (io.in(i).bits.isSoftPrefetch) {
371//      io.out(i).bits.fuType := FuType.ldu.U
372//      io.out(i).bits.fuOpType := Mux(io.in(i).bits.lsrc(1) === 1.U, LSUOpType.prefetch_r, LSUOpType.prefetch_w)
373//      io.out(i).bits.selImm := SelImm.IMM_S
374//      io.out(i).bits.imm := Cat(io.in(i).bits.imm(io.in(i).bits.imm.getWidth - 1, 5), 0.U(5.W))
375//    }
376
377    // dirty code for lui+addi(w) fusion
378    if (i < RenameWidth - 1) {
379      val fused_lui32 = io.in(i).bits.selImm === SelImm.IMM_LUI32 && io.in(i).bits.fuType === FuType.alu.U
380      when (fused_lui32) {
381        val lui_imm = io.in(i).bits.imm(19, 0)
382        val add_imm = io.in(i + 1).bits.imm(11, 0)
383        require(io.out(i).bits.imm.getWidth >= lui_imm.getWidth + add_imm.getWidth)
384        io.out(i).bits.imm := Cat(lui_imm, add_imm)
385      }
386    }
387
388    // write speculative rename table
389    // we update rat later inside commit code
390    intSpecWen(i) := needIntDest(i) && intFreeList.io.canAllocate && intFreeList.io.doAllocate && !io.rabCommits.isWalk && !io.redirect.valid
391    fpSpecWen(i)  := needFpDest(i)  && fpFreeList.io.canAllocate  && fpFreeList.io.doAllocate  && !io.rabCommits.isWalk && !io.redirect.valid
392    vecSpecWen(i) := needVecDest(i) && vecFreeList.io.canAllocate && vecFreeList.io.doAllocate && !io.rabCommits.isWalk && !io.redirect.valid
393    v0SpecWen(i) := needV0Dest(i) && v0FreeList.io.canAllocate && v0FreeList.io.doAllocate && !io.rabCommits.isWalk && !io.redirect.valid
394    vlSpecWen(i) := needVlDest(i) && vlFreeList.io.canAllocate && vlFreeList.io.doAllocate && !io.rabCommits.isWalk && !io.redirect.valid
395
396
397    if (i < RabCommitWidth) {
398      walkIntSpecWen(i) := walkNeedIntDest(i) && !io.redirect.valid
399      walkPdest(i) := io.rabCommits.info(i).pdest
400    } else {
401      walkPdest(i) := io.out(i).bits.pdest
402    }
403  }
404
405  /**
406    * How to set psrc:
407    * - bypass the pdest to psrc if previous instructions write to the same ldest as lsrc
408    * - default: psrc from RAT
409    * How to set pdest:
410    * - Mux(isMove, psrc, pdest_from_freelist).
411    *
412    * The critical path of rename lies here:
413    * When move elimination is enabled, we need to update the rat with psrc.
414    * However, psrc maybe comes from previous instructions' pdest, which comes from freelist.
415    *
416    * If we expand these logic for pdest(N):
417    * pdest(N) = Mux(isMove(N), psrc(N), freelist_out(N))
418    *          = Mux(isMove(N), Mux(bypass(N, N - 1), pdest(N - 1),
419    *                           Mux(bypass(N, N - 2), pdest(N - 2),
420    *                           ...
421    *                           Mux(bypass(N, 0),     pdest(0),
422    *                                                 rat_out(N))...)),
423    *                           freelist_out(N))
424    */
425  // a simple functional model for now
426  io.out(0).bits.pdest := Mux(isMove(0), uops(0).psrc.head, uops(0).pdest)
427
428  // psrc(n) + pdest(1)
429  val bypassCond: Vec[MixedVec[UInt]] = Wire(Vec(numRegSrc + 1, MixedVec(List.tabulate(RenameWidth-1)(i => UInt((i+1).W)))))
430  require(io.in(0).bits.srcType.size == io.in(0).bits.numSrc)
431  private val pdestLoc = io.in.head.bits.srcType.size // 2 vector src: v0, vl&vtype
432  println(s"[Rename] idx of pdest in bypassCond $pdestLoc")
433  for (i <- 1 until RenameWidth) {
434    val v0Cond = io.in(i).bits.srcType.zipWithIndex.map{ case (s, i) =>
435      if (i == 3) (s === SrcType.vp) || (s === SrcType.v0)
436      else false.B
437    } :+ needV0Dest(i)
438    val vlCond = io.in(i).bits.srcType.zipWithIndex.map{ case (s, i) =>
439      if (i == 4) s === SrcType.vp
440      else false.B
441    } :+ needVlDest(i)
442    val vecCond = io.in(i).bits.srcType.map(_ === SrcType.vp) :+ needVecDest(i)
443    val fpCond  = io.in(i).bits.srcType.map(_ === SrcType.fp) :+ needFpDest(i)
444    val intCond = io.in(i).bits.srcType.map(_ === SrcType.xp) :+ needIntDest(i)
445    val target = io.in(i).bits.lsrc :+ io.in(i).bits.ldest
446    for ((((((cond1, (condV0, condVl)), cond2), cond3), t), j) <- vecCond.zip(v0Cond.zip(vlCond)).zip(fpCond).zip(intCond).zip(target).zipWithIndex) {
447      val destToSrc = io.in.take(i).zipWithIndex.map { case (in, j) =>
448        val indexMatch = in.bits.ldest === t
449        val writeMatch =  cond3 && needIntDest(j) || cond2 && needFpDest(j) || cond1 && needVecDest(j)
450        val v0vlMatch = condV0 && needV0Dest(j) || condVl && needVlDest(j)
451        indexMatch && writeMatch || v0vlMatch
452      }
453      bypassCond(j)(i - 1) := VecInit(destToSrc).asUInt
454    }
455    io.out(i).bits.psrc(0) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(0)(i-1).asBools).foldLeft(uops(i).psrc(0)) {
456      (z, next) => Mux(next._2, next._1, z)
457    }
458    io.out(i).bits.psrc(1) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(1)(i-1).asBools).foldLeft(uops(i).psrc(1)) {
459      (z, next) => Mux(next._2, next._1, z)
460    }
461    io.out(i).bits.psrc(2) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(2)(i-1).asBools).foldLeft(uops(i).psrc(2)) {
462      (z, next) => Mux(next._2, next._1, z)
463    }
464    io.out(i).bits.psrc(3) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(3)(i-1).asBools).foldLeft(uops(i).psrc(3)) {
465      (z, next) => Mux(next._2, next._1, z)
466    }
467    io.out(i).bits.psrc(4) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(4)(i-1).asBools).foldLeft(uops(i).psrc(4)) {
468      (z, next) => Mux(next._2, next._1, z)
469    }
470    io.out(i).bits.pdest := Mux(isMove(i), io.out(i).bits.psrc(0), uops(i).pdest)
471
472    // Todo: better implementation for fields reuse
473    // For fused-lui-load, load.src(0) is replaced by the imm.
474    val last_is_lui = io.in(i - 1).bits.selImm === SelImm.IMM_U && io.in(i - 1).bits.srcType(0) =/= SrcType.pc
475    val this_is_load = io.in(i).bits.fuType === FuType.ldu.U
476    val lui_to_load = io.in(i - 1).valid && io.in(i - 1).bits.ldest === io.in(i).bits.lsrc(0)
477    val fused_lui_load = last_is_lui && this_is_load && lui_to_load
478    when (fused_lui_load) {
479      // The first LOAD operand (base address) is replaced by LUI-imm and stored in imm
480      val lui_imm = io.in(i - 1).bits.imm(ImmUnion.U.len - 1, 0)
481      val ld_imm = io.in(i).bits.imm(ImmUnion.I.len - 1, 0)
482      require(io.out(i).bits.imm.getWidth >= lui_imm.getWidth + ld_imm.getWidth)
483      io.out(i).bits.srcType(0) := SrcType.imm
484      io.out(i).bits.imm := Cat(lui_imm, ld_imm)
485    }
486
487  }
488
489  val genSnapshot = Cat(io.out.map(out => out.fire && out.bits.snapshot)).orR
490  val lastCycleCreateSnpt = RegInit(false.B)
491  lastCycleCreateSnpt := genSnapshot && !io.snptIsFull
492  val sameSnptDistance = (RobCommitWidth * 4).U
493  // notInSameSnpt: 1.robidxHead - snapLastEnq >= sameSnptDistance 2.no snap
494  val notInSameSnpt = GatedValidRegNext(distanceBetween(robIdxHeadNext, io.snptLastEnq.bits) >= sameSnptDistance || !io.snptLastEnq.valid)
495  val allowSnpt = if (EnableRenameSnapshot) notInSameSnpt && !lastCycleCreateSnpt && io.in.head.bits.firstUop else false.B
496  io.out.zip(io.in).foreach{ case (out, in) => out.bits.snapshot := allowSnpt && (!in.bits.preDecodeInfo.notCFI || FuType.isJump(in.bits.fuType)) && in.fire }
497  io.out.map{ x =>
498    x.bits.hasException := Cat(selectFrontend(x.bits.exceptionVec) :+ x.bits.exceptionVec(illegalInstr) :+ x.bits.exceptionVec(virtualInstr)).orR || x.bits.trigger.getFrontendCanFire
499  }
500  if(backendParams.debugEn){
501    dontTouch(robIdxHeadNext)
502    dontTouch(notInSameSnpt)
503    dontTouch(genSnapshot)
504  }
505  intFreeList.io.snpt := io.snpt
506  fpFreeList.io.snpt := io.snpt
507  vecFreeList.io.snpt := io.snpt
508  v0FreeList.io.snpt := io.snpt
509  vlFreeList.io.snpt := io.snpt
510  intFreeList.io.snpt.snptEnq := genSnapshot
511  fpFreeList.io.snpt.snptEnq := genSnapshot
512  vecFreeList.io.snpt.snptEnq := genSnapshot
513  v0FreeList.io.snpt.snptEnq := genSnapshot
514  vlFreeList.io.snpt.snptEnq := genSnapshot
515
516  /**
517    * Instructions commit: update freelist and rename table
518    */
519  for (i <- 0 until RabCommitWidth) {
520    val commitValid = io.rabCommits.isCommit && io.rabCommits.commitValid(i)
521    val walkValid = io.rabCommits.isWalk && io.rabCommits.walkValid(i)
522
523    // I. RAT Update
524    // When redirect happens (mis-prediction), don't update the rename table
525    io.intRenamePorts(i).wen  := intSpecWen(i)
526    io.intRenamePorts(i).addr := uops(i).ldest(log2Ceil(IntLogicRegs) - 1, 0)
527    io.intRenamePorts(i).data := io.out(i).bits.pdest
528
529    io.fpRenamePorts(i).wen  := fpSpecWen(i)
530    io.fpRenamePorts(i).addr := uops(i).ldest(log2Ceil(FpLogicRegs) - 1, 0)
531    io.fpRenamePorts(i).data := fpFreeList.io.allocatePhyReg(i)
532
533    io.vecRenamePorts(i).wen := vecSpecWen(i)
534    io.vecRenamePorts(i).addr := uops(i).ldest(log2Ceil(VecLogicRegs) - 1, 0)
535    io.vecRenamePorts(i).data := vecFreeList.io.allocatePhyReg(i)
536
537    io.v0RenamePorts(i).wen := v0SpecWen(i)
538    io.v0RenamePorts(i).addr := uops(i).ldest(log2Ceil(V0LogicRegs) - 1, 0)
539    io.v0RenamePorts(i).data := v0FreeList.io.allocatePhyReg(i)
540
541    io.vlRenamePorts(i).wen := vlSpecWen(i)
542    io.vlRenamePorts(i).addr := uops(i).ldest(log2Ceil(VlLogicRegs) - 1, 0)
543    io.vlRenamePorts(i).data := vlFreeList.io.allocatePhyReg(i)
544
545    // II. Free List Update
546    intFreeList.io.freeReq(i) := io.int_need_free(i)
547    intFreeList.io.freePhyReg(i) := RegNext(io.int_old_pdest(i))
548    fpFreeList.io.freeReq(i)  := GatedValidRegNext(commitValid && needDestRegCommit(Reg_F, io.rabCommits.info(i)))
549    fpFreeList.io.freePhyReg(i) := io.fp_old_pdest(i)
550    vecFreeList.io.freeReq(i)  := GatedValidRegNext(commitValid && needDestRegCommit(Reg_V, io.rabCommits.info(i)))
551    vecFreeList.io.freePhyReg(i) := io.vec_old_pdest(i)
552    v0FreeList.io.freeReq(i) := GatedValidRegNext(commitValid && needDestRegCommit(Reg_V0, io.rabCommits.info(i)))
553    v0FreeList.io.freePhyReg(i) := io.v0_old_pdest(i)
554    vlFreeList.io.freeReq(i) := GatedValidRegNext(commitValid && needDestRegCommit(Reg_Vl, io.rabCommits.info(i)))
555    vlFreeList.io.freePhyReg(i) := io.vl_old_pdest(i)
556  }
557
558  /*
559  Debug and performance counters
560   */
561  def printRenameInfo(in: DecoupledIO[DecodedInst], out: DecoupledIO[DynInst]) = {
562    XSInfo(out.fire, p"pc:${Hexadecimal(in.bits.pc)} in(${in.valid},${in.ready}) " +
563      p"lsrc(0):${in.bits.lsrc(0)} -> psrc(0):${out.bits.psrc(0)} " +
564      p"lsrc(1):${in.bits.lsrc(1)} -> psrc(1):${out.bits.psrc(1)} " +
565      p"lsrc(2):${in.bits.lsrc(2)} -> psrc(2):${out.bits.psrc(2)} " +
566      p"ldest:${in.bits.ldest} -> pdest:${out.bits.pdest}\n"
567    )
568  }
569
570  for ((x,y) <- io.in.zip(io.out)) {
571    printRenameInfo(x, y)
572  }
573
574  io.out.map { case x =>
575    when(x.valid && x.bits.rfWen){
576      assert(x.bits.ldest =/= 0.U, "rfWen cannot be 1 when Int regfile ldest is 0")
577    }
578  }
579  val debugRedirect = RegEnable(io.redirect.bits, io.redirect.valid)
580  // bad speculation
581  val recStall = io.redirect.valid || io.rabCommits.isWalk
582  val ctrlRecStall = Mux(io.redirect.valid, io.redirect.bits.debugIsCtrl, io.rabCommits.isWalk && debugRedirect.debugIsCtrl)
583  val mvioRecStall = Mux(io.redirect.valid, io.redirect.bits.debugIsMemVio, io.rabCommits.isWalk && debugRedirect.debugIsMemVio)
584  val otherRecStall = recStall && !(ctrlRecStall || mvioRecStall)
585  XSPerfAccumulate("recovery_stall", recStall)
586  XSPerfAccumulate("control_recovery_stall", ctrlRecStall)
587  XSPerfAccumulate("mem_violation_recovery_stall", mvioRecStall)
588  XSPerfAccumulate("other_recovery_stall", otherRecStall)
589  // freelist stall
590  val notRecStall = !io.out.head.valid && !recStall
591  val intFlStall = notRecStall && inHeadValid && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !intFreeList.io.canAllocate
592  val fpFlStall = notRecStall && inHeadValid && intFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !fpFreeList.io.canAllocate
593  val vecFlStall = notRecStall && inHeadValid && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !vecFreeList.io.canAllocate
594  val v0FlStall = notRecStall && inHeadValid && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && vlFreeList.io.canAllocate && !v0FreeList.io.canAllocate
595  val vlFlStall = notRecStall && inHeadValid && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && !vlFreeList.io.canAllocate
596  val multiFlStall = notRecStall && inHeadValid && (PopCount(Cat(
597    !intFreeList.io.canAllocate,
598    !fpFreeList.io.canAllocate,
599    !vecFreeList.io.canAllocate,
600    !v0FreeList.io.canAllocate,
601    !vlFreeList.io.canAllocate,
602  )) > 1.U)
603  // other stall
604  val otherStall = notRecStall && !intFlStall && !fpFlStall && !vecFlStall && !v0FlStall && !vlFlStall && !multiFlStall
605
606  io.stallReason.in.backReason.valid := io.stallReason.out.backReason.valid || !io.in.head.ready
607  io.stallReason.in.backReason.bits := Mux(io.stallReason.out.backReason.valid, io.stallReason.out.backReason.bits,
608    MuxCase(TopDownCounters.OtherCoreStall.id.U, Seq(
609      ctrlRecStall  -> TopDownCounters.ControlRecoveryStall.id.U,
610      mvioRecStall  -> TopDownCounters.MemVioRecoveryStall.id.U,
611      otherRecStall -> TopDownCounters.OtherRecoveryStall.id.U,
612      intFlStall    -> TopDownCounters.IntFlStall.id.U,
613      fpFlStall     -> TopDownCounters.FpFlStall.id.U,
614      vecFlStall    -> TopDownCounters.VecFlStall.id.U,
615      v0FlStall     -> TopDownCounters.V0FlStall.id.U,
616      vlFlStall     -> TopDownCounters.VlFlStall.id.U,
617      multiFlStall  -> TopDownCounters.MultiFlStall.id.U,
618    )
619  ))
620  io.stallReason.out.reason.zip(io.stallReason.in.reason).zip(io.in.map(_.valid)).foreach { case ((out, in), valid) =>
621    out := Mux(io.stallReason.in.backReason.valid, io.stallReason.in.backReason.bits, in)
622  }
623
624  XSDebug(io.rabCommits.isWalk, p"Walk Recovery Enabled\n")
625  XSDebug(io.rabCommits.isWalk, p"validVec:${Binary(io.rabCommits.walkValid.asUInt)}\n")
626  for (i <- 0 until RabCommitWidth) {
627    val info = io.rabCommits.info(i)
628    XSDebug(io.rabCommits.isWalk && io.rabCommits.walkValid(i), p"[#$i walk info] " +
629      p"ldest:${info.ldest} rfWen:${info.rfWen} fpWen:${info.fpWen} vecWen:${info.vecWen} v0Wen:${info.v0Wen} vlWen:${info.vlWen}")
630  }
631
632  XSDebug(p"inValidVec: ${Binary(Cat(io.in.map(_.valid)))}\n")
633
634  XSPerfAccumulate("in_valid_count", PopCount(io.in.map(_.valid)))
635  XSPerfAccumulate("in_fire_count", PopCount(io.in.map(_.fire)))
636  XSPerfAccumulate("in_valid_not_ready_count", PopCount(io.in.map(x => x.valid && !x.ready)))
637  XSPerfAccumulate("wait_cycle", !io.in.head.valid && dispatchCanAcc)
638
639  // These stall reasons could overlap each other, but we configure the priority as fellows.
640  // walk stall > dispatch stall > int freelist stall > fp freelist stall
641  private val inHeadStall = io.in.head match { case x => x.valid && !x.ready }
642  private val stallForWalk      = inHeadValid &&  io.rabCommits.isWalk
643  private val stallForDispatch  = inHeadValid && !io.rabCommits.isWalk && !dispatchCanAcc
644  private val stallForIntFL     = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !intFreeList.io.canAllocate
645  private val stallForFpFL      = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !fpFreeList.io.canAllocate
646  private val stallForVecFL     = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !vecFreeList.io.canAllocate
647  private val stallForV0FL      = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && vlFreeList.io.canAllocate && !v0FreeList.io.canAllocate
648  private val stallForVlFL      = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && !vlFreeList.io.canAllocate
649  XSPerfAccumulate("stall_cycle",          inHeadStall)
650  XSPerfAccumulate("stall_cycle_walk",     stallForWalk)
651  XSPerfAccumulate("stall_cycle_dispatch", stallForDispatch)
652  XSPerfAccumulate("stall_cycle_int",      stallForIntFL)
653  XSPerfAccumulate("stall_cycle_fp",       stallForFpFL)
654  XSPerfAccumulate("stall_cycle_vec",      stallForVecFL)
655  XSPerfAccumulate("stall_cycle_vec",      stallForV0FL)
656  XSPerfAccumulate("stall_cycle_vec",      stallForVlFL)
657
658  XSPerfHistogram("in_valid_range",  PopCount(io.in.map(_.valid)),  true.B, 0, DecodeWidth + 1, 1)
659  XSPerfHistogram("in_fire_range",   PopCount(io.in.map(_.fire)),   true.B, 0, DecodeWidth + 1, 1)
660  XSPerfHistogram("out_valid_range", PopCount(io.out.map(_.valid)), true.B, 0, DecodeWidth + 1, 1)
661  XSPerfHistogram("out_fire_range",  PopCount(io.out.map(_.fire)),  true.B, 0, DecodeWidth + 1, 1)
662
663  XSPerfAccumulate("move_instr_count", PopCount(io.out.map(out => out.fire && out.bits.isMove)))
664  val is_fused_lui_load = io.out.map(o => o.fire && o.bits.fuType === FuType.ldu.U && o.bits.srcType(0) === SrcType.imm)
665  XSPerfAccumulate("fused_lui_load_instr_count", PopCount(is_fused_lui_load))
666
667  val renamePerf = Seq(
668    ("rename_in                  ", PopCount(io.in.map(_.valid & io.in(0).ready ))                                                               ),
669    ("rename_waitinstr           ", PopCount((0 until RenameWidth).map(i => io.in(i).valid && !io.in(i).ready))                                  ),
670    ("rename_stall               ", inHeadStall),
671    ("rename_stall_cycle_walk    ", inHeadValid &&  io.rabCommits.isWalk),
672    ("rename_stall_cycle_dispatch", inHeadValid && !io.rabCommits.isWalk && !dispatchCanAcc),
673    ("rename_stall_cycle_int     ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !intFreeList.io.canAllocate),
674    ("rename_stall_cycle_fp      ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !fpFreeList.io.canAllocate),
675    ("rename_stall_cycle_vec     ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !vecFreeList.io.canAllocate),
676    ("rename_stall_cycle_v0      ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && vlFreeList.io.canAllocate && !v0FreeList.io.canAllocate),
677    ("rename_stall_cycle_vl      ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && !vlFreeList.io.canAllocate),
678  )
679  val intFlPerf = intFreeList.getPerfEvents
680  val fpFlPerf = fpFreeList.getPerfEvents
681  val vecFlPerf = vecFreeList.getPerfEvents
682  val v0FlPerf = v0FreeList.getPerfEvents
683  val vlFlPerf = vlFreeList.getPerfEvents
684  val perfEvents = renamePerf ++ intFlPerf ++ fpFlPerf ++ vecFlPerf ++ v0FlPerf ++ vlFlPerf
685  generatePerfEvent()
686}
687