1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.backend.rename 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utility._ 23import utils._ 24import xiangshan._ 25import xiangshan.backend.Bundles.{DecodedInst, DynInst} 26import xiangshan.backend.decode.{FusionDecodeInfo, ImmUnion, Imm_I, Imm_LUI_LOAD, Imm_U} 27import xiangshan.backend.fu.FuType 28import xiangshan.backend.rename.freelist._ 29import xiangshan.backend.rob.{RobEnqIO, RobPtr} 30import xiangshan.mem.mdp._ 31import xiangshan.ExceptionNO._ 32import xiangshan.backend.fu.FuType._ 33import xiangshan.mem.{EewLog2, GenUSWholeEmul} 34import xiangshan.mem.GenRealFlowNum 35import xiangshan.backend.trace._ 36import xiangshan.backend.decode.isa.bitfield.{OPCODE5Bit, XSInstBitFields} 37import xiangshan.backend.fu.util.CSRConst 38 39class Rename(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper with HasPerfEvents { 40 41 // params alias 42 private val numRegSrc = backendParams.numRegSrc 43 private val numVecRegSrc = backendParams.numVecRegSrc 44 private val numVecRatPorts = numVecRegSrc 45 46 println(s"[Rename] numRegSrc: $numRegSrc") 47 48 val io = IO(new Bundle() { 49 val redirect = Flipped(ValidIO(new Redirect)) 50 val rabCommits = Input(new RabCommitIO) 51 // from csr 52 val singleStep = Input(Bool()) 53 // from decode 54 val in = Vec(RenameWidth, Flipped(DecoupledIO(new DecodedInst))) 55 val fusionInfo = Vec(DecodeWidth - 1, Flipped(new FusionDecodeInfo)) 56 // ssit read result 57 val ssit = Flipped(Vec(RenameWidth, Output(new SSITEntry))) 58 // waittable read result 59 val waittable = Flipped(Vec(RenameWidth, Output(Bool()))) 60 // to rename table 61 val intReadPorts = Vec(RenameWidth, Vec(2, Input(UInt(PhyRegIdxWidth.W)))) 62 val fpReadPorts = Vec(RenameWidth, Vec(3, Input(UInt(PhyRegIdxWidth.W)))) 63 val vecReadPorts = Vec(RenameWidth, Vec(numVecRatPorts, Input(UInt(PhyRegIdxWidth.W)))) 64 val v0ReadPorts = Vec(RenameWidth, Vec(1, Input(UInt(PhyRegIdxWidth.W)))) 65 val vlReadPorts = Vec(RenameWidth, Vec(1, Input(UInt(PhyRegIdxWidth.W)))) 66 val intRenamePorts = Vec(RenameWidth, Output(new RatWritePort(log2Ceil(IntLogicRegs)))) 67 val fpRenamePorts = Vec(RenameWidth, Output(new RatWritePort(log2Ceil(FpLogicRegs)))) 68 val vecRenamePorts = Vec(RenameWidth, Output(new RatWritePort(log2Ceil(VecLogicRegs)))) 69 val v0RenamePorts = Vec(RenameWidth, Output(new RatWritePort(log2Ceil(V0LogicRegs)))) 70 val vlRenamePorts = Vec(RenameWidth, Output(new RatWritePort(log2Ceil(VlLogicRegs)))) 71 // from rename table 72 val int_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W))) 73 val fp_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W))) 74 val vec_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W))) 75 val v0_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W))) 76 val vl_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W))) 77 val int_need_free = Vec(RabCommitWidth, Input(Bool())) 78 // to dispatch1 79 val out = Vec(RenameWidth, DecoupledIO(new DynInst)) 80 // for snapshots 81 val snpt = Input(new SnapshotPort) 82 val snptLastEnq = Flipped(ValidIO(new RobPtr)) 83 val snptIsFull= Input(Bool()) 84 // debug arch ports 85 val debug_int_rat = if (backendParams.debugEn) Some(Vec(32, Input(UInt(PhyRegIdxWidth.W)))) else None 86 val debug_fp_rat = if (backendParams.debugEn) Some(Vec(32, Input(UInt(PhyRegIdxWidth.W)))) else None 87 val debug_vec_rat = if (backendParams.debugEn) Some(Vec(31, Input(UInt(PhyRegIdxWidth.W)))) else None 88 val debug_v0_rat = if (backendParams.debugEn) Some(Vec(1, Input(UInt(PhyRegIdxWidth.W)))) else None 89 val debug_vl_rat = if (backendParams.debugEn) Some(Vec(1, Input(UInt(PhyRegIdxWidth.W)))) else None 90 // perf only 91 val stallReason = new Bundle { 92 val in = Flipped(new StallReasonIO(RenameWidth)) 93 val out = new StallReasonIO(RenameWidth) 94 } 95 }) 96 97 // io alias 98 private val dispatchCanAcc = io.out.head.ready 99 100 val compressUnit = Module(new CompressUnit()) 101 // create free list and rat 102 val intFreeList = Module(new MEFreeList(IntPhyRegs)) 103 val fpFreeList = Module(new StdFreeList(FpPhyRegs - FpLogicRegs, FpLogicRegs, Reg_F)) 104 val vecFreeList = Module(new StdFreeList(VfPhyRegs - VecLogicRegs, VecLogicRegs, Reg_V, 31)) 105 val v0FreeList = Module(new StdFreeList(V0PhyRegs - V0LogicRegs, V0LogicRegs, Reg_V0, 1)) 106 val vlFreeList = Module(new StdFreeList(VlPhyRegs - VlLogicRegs, VlLogicRegs, Reg_Vl, 1)) 107 108 109 intFreeList.io.commit <> io.rabCommits 110 intFreeList.io.debug_rat.foreach(_ <> io.debug_int_rat.get) 111 fpFreeList.io.commit <> io.rabCommits 112 fpFreeList.io.debug_rat.foreach(_ <> io.debug_fp_rat.get) 113 vecFreeList.io.commit <> io.rabCommits 114 vecFreeList.io.debug_rat.foreach(_ <> io.debug_vec_rat.get) 115 v0FreeList.io.commit <> io.rabCommits 116 v0FreeList.io.debug_rat.foreach(_ <> io.debug_v0_rat.get) 117 vlFreeList.io.commit <> io.rabCommits 118 vlFreeList.io.debug_rat.foreach(_ <> io.debug_vl_rat.get) 119 120 // decide if given instruction needs allocating a new physical register (CfCtrl: from decode; RobCommitInfo: from rob) 121 def needDestReg[T <: DecodedInst](reg_t: RegType, x: T): Bool = reg_t match { 122 case Reg_I => x.rfWen && x.ldest =/= 0.U 123 case Reg_F => x.fpWen 124 case Reg_V => x.vecWen 125 case Reg_V0 => x.v0Wen 126 case Reg_Vl => x.vlWen 127 } 128 def needDestRegCommit[T <: RabCommitInfo](reg_t: RegType, x: T): Bool = { 129 reg_t match { 130 case Reg_I => x.rfWen 131 case Reg_F => x.fpWen 132 case Reg_V => x.vecWen 133 case Reg_V0 => x.v0Wen 134 case Reg_Vl => x.vlWen 135 } 136 } 137 def needDestRegWalk[T <: RabCommitInfo](reg_t: RegType, x: T): Bool = { 138 reg_t match { 139 case Reg_I => x.rfWen && x.ldest =/= 0.U 140 case Reg_F => x.fpWen 141 case Reg_V => x.vecWen 142 case Reg_V0 => x.v0Wen 143 case Reg_Vl => x.vlWen 144 } 145 } 146 147 // connect [redirect + walk] ports for fp & vec & int free list 148 Seq(fpFreeList, vecFreeList, intFreeList, v0FreeList, vlFreeList).foreach { case fl => 149 fl.io.redirect := io.redirect.valid 150 fl.io.walk := io.rabCommits.isWalk 151 } 152 // only when all free list and dispatch1 has enough space can we do allocation 153 // when isWalk, freelist can definitely allocate 154 intFreeList.io.doAllocate := fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk 155 fpFreeList.io.doAllocate := intFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk 156 vecFreeList.io.doAllocate := intFreeList.io.canAllocate && fpFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk 157 v0FreeList.io.doAllocate := intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && vlFreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk 158 vlFreeList.io.doAllocate := intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk 159 160 // dispatch1 ready ++ float point free list ready ++ int free list ready ++ vec free list ready ++ not walk 161 val canOut = dispatchCanAcc && fpFreeList.io.canAllocate && intFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !io.rabCommits.isWalk 162 163 compressUnit.io.in.zip(io.in).foreach{ case(sink, source) => 164 sink.valid := source.valid && !io.singleStep 165 sink.bits := source.bits 166 } 167 val needRobFlags = compressUnit.io.out.needRobFlags 168 val instrSizesVec = compressUnit.io.out.instrSizes 169 val compressMasksVec = compressUnit.io.out.masks 170 171 // speculatively assign the instruction with an robIdx 172 val validCount = PopCount(io.in.zip(needRobFlags).map{ case(in, needRobFlag) => in.valid && in.bits.lastUop && needRobFlag}) // number of instructions waiting to enter rob (from decode) 173 val robIdxHead = RegInit(0.U.asTypeOf(new RobPtr)) 174 val lastCycleMisprediction = GatedValidRegNext(io.redirect.valid && !io.redirect.bits.flushItself()) 175 val robIdxHeadNext = Mux(io.redirect.valid, io.redirect.bits.robIdx, // redirect: move ptr to given rob index 176 Mux(lastCycleMisprediction, robIdxHead + 1.U, // mis-predict: not flush robIdx itself 177 Mux(canOut, robIdxHead + validCount, // instructions successfully entered next stage: increase robIdx 178 /* default */ robIdxHead))) // no instructions passed by this cycle: stick to old value 179 robIdxHead := robIdxHeadNext 180 181 /** 182 * Rename: allocate free physical register and update rename table 183 */ 184 val uops = Wire(Vec(RenameWidth, new DynInst)) 185 uops.foreach( uop => { 186 uop.srcState := DontCare 187 uop.debugInfo := DontCare 188 uop.lqIdx := DontCare 189 uop.sqIdx := DontCare 190 uop.waitForRobIdx := DontCare 191 uop.singleStep := DontCare 192 uop.snapshot := DontCare 193 uop.srcLoadDependency := DontCare 194 uop.numLsElem := DontCare 195 uop.hasException := DontCare 196 uop.useRegCache := DontCare 197 uop.regCacheIdx := DontCare 198 uop.traceBlockInPipe := DontCare 199 }) 200 private val inst = Wire(Vec(RenameWidth, new XSInstBitFields)) 201 private val isCsr = Wire(Vec(RenameWidth, Bool())) 202 private val isCsrr = Wire(Vec(RenameWidth, Bool())) 203 private val isRoCsrr = Wire(Vec(RenameWidth, Bool())) 204 private val fuType = uops.map(_.fuType) 205 private val fuOpType = uops.map(_.fuOpType) 206 private val vtype = uops.map(_.vpu.vtype) 207 private val sew = vtype.map(_.vsew) 208 private val lmul = vtype.map(_.vlmul) 209 private val eew = uops.map(_.vpu.veew) 210 private val mop = fuOpType.map(fuOpTypeItem => LSUOpType.getVecLSMop(fuOpTypeItem)) 211 private val isVlsType = fuType.map(fuTypeItem => isVls(fuTypeItem)) 212 private val isSegment = fuType.map(fuTypeItem => isVsegls(fuTypeItem)) 213 private val isUnitStride = fuOpType.map(fuOpTypeItem => LSUOpType.isAllUS(fuOpTypeItem)) 214 private val nf = fuOpType.zip(uops.map(_.vpu.nf)).map { case (fuOpTypeItem, nfItem) => Mux(LSUOpType.isWhole(fuOpTypeItem), 0.U, nfItem) } 215 private val mulBits = 3 // dirty code 216 private val emul = fuOpType.zipWithIndex.map { case (fuOpTypeItem, index) => 217 Mux( 218 LSUOpType.isWhole(fuOpTypeItem), 219 GenUSWholeEmul(nf(index)), 220 Mux( 221 LSUOpType.isMasked(fuOpTypeItem), 222 0.U(mulBits.W), 223 EewLog2(eew(index)) - sew(index) + lmul(index) 224 ) 225 ) 226 } 227 private val isVecUnitType = isVlsType.zip(isUnitStride).map { case (isVlsTypeItme, isUnitStrideItem) => 228 isVlsTypeItme && isUnitStrideItem 229 } 230 private val instType = isSegment.zip(mop).map { case (isSegementItem, mopItem) => Cat(isSegementItem, mopItem) } 231 // There is no way to calculate the 'flow' for 'unit-stride' exactly: 232 // Whether 'unit-stride' needs to be split can only be known after obtaining the address. 233 // For scalar instructions, this is not handled here, and different assignments are done later according to the situation. 234 private val numLsElem = instType.zipWithIndex.map { case (instTypeItem, index) => 235 Mux( 236 isVecUnitType(index), 237 VecMemUnitStrideMaxFlowNum.U, 238 GenRealFlowNum(instTypeItem, emul(index), lmul(index), eew(index), sew(index)) 239 ) 240 } 241 uops.zipWithIndex.map { case(u, i) => 242 u.numLsElem := Mux(io.in(i).valid & isVlsType(i), numLsElem(i), 0.U) 243 } 244 245 val needVecDest = Wire(Vec(RenameWidth, Bool())) 246 val needFpDest = Wire(Vec(RenameWidth, Bool())) 247 val needIntDest = Wire(Vec(RenameWidth, Bool())) 248 val needV0Dest = Wire(Vec(RenameWidth, Bool())) 249 val needVlDest = Wire(Vec(RenameWidth, Bool())) 250 private val inHeadValid = io.in.head.valid 251 252 val isMove = Wire(Vec(RenameWidth, Bool())) 253 isMove zip io.in.map(_.bits) foreach { 254 case (move, in) => move := Mux(in.exceptionVec.asUInt.orR, false.B, in.isMove) 255 } 256 257 val walkNeedIntDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B))) 258 val walkNeedFpDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B))) 259 val walkNeedVecDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B))) 260 val walkNeedV0Dest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B))) 261 val walkNeedVlDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B))) 262 val walkIsMove = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B))) 263 264 val intSpecWen = Wire(Vec(RenameWidth, Bool())) 265 val fpSpecWen = Wire(Vec(RenameWidth, Bool())) 266 val vecSpecWen = Wire(Vec(RenameWidth, Bool())) 267 val v0SpecWen = Wire(Vec(RenameWidth, Bool())) 268 val vlSpecWen = Wire(Vec(RenameWidth, Bool())) 269 270 val walkIntSpecWen = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B))) 271 272 val walkPdest = Wire(Vec(RenameWidth, UInt(PhyRegIdxWidth.W))) 273 274 // uop calculation 275 for (i <- 0 until RenameWidth) { 276 (uops(i): Data).waiveAll :<= (io.in(i).bits: Data).waiveAll 277 278 // read only CSRR instruction support: remove blockBackward and waitForward 279 inst(i) := uops(i).instr.asTypeOf(new XSInstBitFields) 280 isCsr(i) := inst(i).OPCODE5Bit === OPCODE5Bit.SYSTEM && inst(i).FUNCT3(1, 0) =/= 0.U 281 isCsrr(i) := isCsr(i) && inst(i).FUNCT3 === BitPat("b?1?") && inst(i).RS1 === 0.U 282 isRoCsrr(i) := isCsrr(i) && LookupTreeDefault( 283 inst(i).CSRIDX, false.B, CSRConst.roCsrrAddr.map(_.U -> true.B)) 284 285 uops(i).waitForward := io.in(i).bits.waitForward && !isRoCsrr(i) 286 uops(i).blockBackward := io.in(i).bits.blockBackward && !isRoCsrr(i) 287 288 // update cf according to ssit result 289 uops(i).storeSetHit := io.ssit(i).valid 290 uops(i).loadWaitStrict := io.ssit(i).strict && io.ssit(i).valid 291 uops(i).ssid := io.ssit(i).ssid 292 293 // update cf according to waittable result 294 uops(i).loadWaitBit := io.waittable(i) 295 296 uops(i).replayInst := false.B // set by IQ or MemQ 297 // alloc a new phy reg 298 needV0Dest(i) := io.in(i).valid && needDestReg(Reg_V0, io.in(i).bits) 299 needVlDest(i) := io.in(i).valid && needDestReg(Reg_Vl, io.in(i).bits) 300 needVecDest(i) := io.in(i).valid && needDestReg(Reg_V, io.in(i).bits) 301 needFpDest(i) := io.in(i).valid && needDestReg(Reg_F, io.in(i).bits) 302 needIntDest(i) := io.in(i).valid && needDestReg(Reg_I, io.in(i).bits) 303 if (i < RabCommitWidth) { 304 walkNeedIntDest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_I, io.rabCommits.info(i)) 305 walkNeedFpDest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_F, io.rabCommits.info(i)) 306 walkNeedVecDest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_V, io.rabCommits.info(i)) 307 walkNeedV0Dest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_V0, io.rabCommits.info(i)) 308 walkNeedVlDest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_Vl, io.rabCommits.info(i)) 309 walkIsMove(i) := io.rabCommits.info(i).isMove 310 } 311 fpFreeList.io.allocateReq(i) := needFpDest(i) 312 fpFreeList.io.walkReq(i) := walkNeedFpDest(i) 313 vecFreeList.io.allocateReq(i) := needVecDest(i) 314 vecFreeList.io.walkReq(i) := walkNeedVecDest(i) 315 v0FreeList.io.allocateReq(i) := needV0Dest(i) 316 v0FreeList.io.walkReq(i) := walkNeedV0Dest(i) 317 vlFreeList.io.allocateReq(i) := needVlDest(i) 318 vlFreeList.io.walkReq(i) := walkNeedVlDest(i) 319 intFreeList.io.allocateReq(i) := needIntDest(i) && !isMove(i) 320 intFreeList.io.walkReq(i) := walkNeedIntDest(i) && !walkIsMove(i) 321 322 // no valid instruction from decode stage || all resources (dispatch1 + both free lists) ready 323 io.in(i).ready := !io.in(0).valid || canOut 324 325 uops(i).robIdx := robIdxHead + PopCount(io.in.zip(needRobFlags).take(i).map{ case(in, needRobFlag) => in.valid && in.bits.lastUop && needRobFlag}) 326 uops(i).instrSize := instrSizesVec(i) 327 val hasExceptionExceptFlushPipe = Cat(selectFrontend(uops(i).exceptionVec) :+ uops(i).exceptionVec(illegalInstr) :+ uops(i).exceptionVec(virtualInstr)).orR || TriggerAction.isDmode(uops(i).trigger) 328 when(isMove(i) || hasExceptionExceptFlushPipe) { 329 uops(i).numUops := 0.U 330 uops(i).numWB := 0.U 331 } 332 if (i > 0) { 333 when(!needRobFlags(i - 1)) { 334 uops(i).firstUop := false.B 335 uops(i).ftqPtr := uops(i - 1).ftqPtr 336 uops(i).ftqOffset := uops(i - 1).ftqOffset 337 uops(i).numUops := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse)) 338 uops(i).numWB := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse)) 339 } 340 } 341 when(!needRobFlags(i)) { 342 uops(i).lastUop := false.B 343 uops(i).numUops := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse)) 344 uops(i).numWB := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse)) 345 } 346 uops(i).wfflags := (compressMasksVec(i) & Cat(io.in.map(_.bits.wfflags).reverse)).orR 347 uops(i).dirtyFs := (compressMasksVec(i) & Cat(io.in.map(_.bits.fpWen).reverse)).orR 348 // vector instructions' uopSplitType cannot be UopSplitType.SCA_SIM 349 uops(i).dirtyVs := (compressMasksVec(i) & Cat(io.in.map(_.bits.uopSplitType =/= UopSplitType.SCA_SIM).reverse)).orR 350 // psrc0,psrc1,psrc2 don't require v0ReadPorts because their srcType can distinguish whether they are V0 or not 351 uops(i).psrc(0) := Mux1H(uops(i).srcType(0)(2, 0), Seq(io.intReadPorts(i)(0), io.fpReadPorts(i)(0), io.vecReadPorts(i)(0))) 352 uops(i).psrc(1) := Mux1H(uops(i).srcType(1)(2, 0), Seq(io.intReadPorts(i)(1), io.fpReadPorts(i)(1), io.vecReadPorts(i)(1))) 353 uops(i).psrc(2) := Mux1H(uops(i).srcType(2)(2, 1), Seq(io.fpReadPorts(i)(2), io.vecReadPorts(i)(2))) 354 uops(i).psrc(3) := io.v0ReadPorts(i)(0) 355 uops(i).psrc(4) := io.vlReadPorts(i)(0) 356 357 // int psrc2 should be bypassed from next instruction if it is fused 358 if (i < RenameWidth - 1) { 359 when (io.fusionInfo(i).rs2FromRs2 || io.fusionInfo(i).rs2FromRs1) { 360 uops(i).psrc(1) := Mux(io.fusionInfo(i).rs2FromRs2, io.intReadPorts(i + 1)(1), io.intReadPorts(i + 1)(0)) 361 }.elsewhen(io.fusionInfo(i).rs2FromZero) { 362 uops(i).psrc(1) := 0.U 363 } 364 } 365 uops(i).eliminatedMove := isMove(i) 366 367 // update pdest 368 uops(i).pdest := MuxCase(0.U, Seq( 369 needIntDest(i) -> intFreeList.io.allocatePhyReg(i), 370 needFpDest(i) -> fpFreeList.io.allocatePhyReg(i), 371 needVecDest(i) -> vecFreeList.io.allocatePhyReg(i), 372 needV0Dest(i) -> v0FreeList.io.allocatePhyReg(i), 373 needVlDest(i) -> vlFreeList.io.allocatePhyReg(i), 374 )) 375 376 // Assign performance counters 377 uops(i).debugInfo.renameTime := GTimer() 378 379 io.out(i).valid := io.in(i).valid && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !io.rabCommits.isWalk 380 io.out(i).bits := uops(i) 381 // Todo: move these shit in decode stage 382 // dirty code for fence. The lsrc is passed by imm. 383 when (io.out(i).bits.fuType === FuType.fence.U) { 384 io.out(i).bits.imm := Cat(io.in(i).bits.lsrc(1), io.in(i).bits.lsrc(0)) 385 } 386 387 // dirty code for SoftPrefetch (prefetch.r/prefetch.w) 388// when (io.in(i).bits.isSoftPrefetch) { 389// io.out(i).bits.fuType := FuType.ldu.U 390// io.out(i).bits.fuOpType := Mux(io.in(i).bits.lsrc(1) === 1.U, LSUOpType.prefetch_r, LSUOpType.prefetch_w) 391// io.out(i).bits.selImm := SelImm.IMM_S 392// io.out(i).bits.imm := Cat(io.in(i).bits.imm(io.in(i).bits.imm.getWidth - 1, 5), 0.U(5.W)) 393// } 394 395 // dirty code for lui+addi(w) fusion 396 if (i < RenameWidth - 1) { 397 val fused_lui32 = io.in(i).bits.selImm === SelImm.IMM_LUI32 && io.in(i).bits.fuType === FuType.alu.U 398 when (fused_lui32) { 399 val lui_imm = io.in(i).bits.imm(19, 0) 400 val add_imm = io.in(i + 1).bits.imm(11, 0) 401 require(io.out(i).bits.imm.getWidth >= lui_imm.getWidth + add_imm.getWidth) 402 io.out(i).bits.imm := Cat(lui_imm, add_imm) 403 } 404 } 405 406 // write speculative rename table 407 // we update rat later inside commit code 408 intSpecWen(i) := needIntDest(i) && intFreeList.io.canAllocate && intFreeList.io.doAllocate && !io.rabCommits.isWalk && !io.redirect.valid 409 fpSpecWen(i) := needFpDest(i) && fpFreeList.io.canAllocate && fpFreeList.io.doAllocate && !io.rabCommits.isWalk && !io.redirect.valid 410 vecSpecWen(i) := needVecDest(i) && vecFreeList.io.canAllocate && vecFreeList.io.doAllocate && !io.rabCommits.isWalk && !io.redirect.valid 411 v0SpecWen(i) := needV0Dest(i) && v0FreeList.io.canAllocate && v0FreeList.io.doAllocate && !io.rabCommits.isWalk && !io.redirect.valid 412 vlSpecWen(i) := needVlDest(i) && vlFreeList.io.canAllocate && vlFreeList.io.doAllocate && !io.rabCommits.isWalk && !io.redirect.valid 413 414 415 if (i < RabCommitWidth) { 416 walkIntSpecWen(i) := walkNeedIntDest(i) && !io.redirect.valid 417 walkPdest(i) := io.rabCommits.info(i).pdest 418 } else { 419 walkPdest(i) := io.out(i).bits.pdest 420 } 421 } 422 423 /** 424 * trace begin 425 */ 426 val inVec = io.in.map(_.bits) 427 val canRobCompressVec = inVec.map(_.canRobCompress) 428 val isRVCVec = inVec.map(_.preDecodeInfo.isRVC) 429 val halfWordNumVec = (0 until RenameWidth).map{ 430 i => compressMasksVec(i).asBools.zip(isRVCVec).map{ 431 case (mask, isRVC) => Mux(mask, Mux(isRVC, 1.U, 2.U), 0.U) 432 } 433 } 434 435 for (i <- 0 until RenameWidth) { 436 // iretire 437 uops(i).traceBlockInPipe.iretire := Mux(canRobCompressVec(i), 438 halfWordNumVec(i).reduce(_ +& _), 439 Mux(isRVCVec(i), 1.U, 2.U) 440 ) 441 442 // ilastsize 443 val j = i 444 val lastIsRVC = WireInit(false.B) 445 (j until RenameWidth).map { j => 446 when(compressMasksVec(i)(j)) { 447 lastIsRVC := io.in(j).bits.preDecodeInfo.isRVC 448 } 449 } 450 451 uops(i).traceBlockInPipe.ilastsize := Mux(canRobCompressVec(i), 452 Mux(lastIsRVC, Ilastsize.HalfWord, Ilastsize.Word), 453 Mux(isRVCVec(i), Ilastsize.HalfWord, Ilastsize.Word) 454 ) 455 456 // itype 457 uops(i).traceBlockInPipe.itype := Itype.jumpTypeGen(inVec(i).preDecodeInfo.brType, inVec(i).ldest.asTypeOf(new OpRegType), inVec(i).lsrc(0).asTypeOf((new OpRegType))) 458 } 459 /** 460 * trace end 461 */ 462 463 /** 464 * How to set psrc: 465 * - bypass the pdest to psrc if previous instructions write to the same ldest as lsrc 466 * - default: psrc from RAT 467 * How to set pdest: 468 * - Mux(isMove, psrc, pdest_from_freelist). 469 * 470 * The critical path of rename lies here: 471 * When move elimination is enabled, we need to update the rat with psrc. 472 * However, psrc maybe comes from previous instructions' pdest, which comes from freelist. 473 * 474 * If we expand these logic for pdest(N): 475 * pdest(N) = Mux(isMove(N), psrc(N), freelist_out(N)) 476 * = Mux(isMove(N), Mux(bypass(N, N - 1), pdest(N - 1), 477 * Mux(bypass(N, N - 2), pdest(N - 2), 478 * ... 479 * Mux(bypass(N, 0), pdest(0), 480 * rat_out(N))...)), 481 * freelist_out(N)) 482 */ 483 // a simple functional model for now 484 io.out(0).bits.pdest := Mux(isMove(0), uops(0).psrc.head, uops(0).pdest) 485 486 // psrc(n) + pdest(1) 487 val bypassCond: Vec[MixedVec[UInt]] = Wire(Vec(numRegSrc + 1, MixedVec(List.tabulate(RenameWidth-1)(i => UInt((i+1).W))))) 488 require(io.in(0).bits.srcType.size == io.in(0).bits.numSrc) 489 private val pdestLoc = io.in.head.bits.srcType.size // 2 vector src: v0, vl&vtype 490 println(s"[Rename] idx of pdest in bypassCond $pdestLoc") 491 for (i <- 1 until RenameWidth) { 492 val v0Cond = io.in(i).bits.srcType.zipWithIndex.map{ case (s, i) => 493 if (i == 3) (s === SrcType.vp) || (s === SrcType.v0) 494 else false.B 495 } :+ needV0Dest(i) 496 val vlCond = io.in(i).bits.srcType.zipWithIndex.map{ case (s, i) => 497 if (i == 4) s === SrcType.vp 498 else false.B 499 } :+ needVlDest(i) 500 val vecCond = io.in(i).bits.srcType.map(_ === SrcType.vp) :+ needVecDest(i) 501 val fpCond = io.in(i).bits.srcType.map(_ === SrcType.fp) :+ needFpDest(i) 502 val intCond = io.in(i).bits.srcType.map(_ === SrcType.xp) :+ needIntDest(i) 503 val target = io.in(i).bits.lsrc :+ io.in(i).bits.ldest 504 for ((((((cond1, (condV0, condVl)), cond2), cond3), t), j) <- vecCond.zip(v0Cond.zip(vlCond)).zip(fpCond).zip(intCond).zip(target).zipWithIndex) { 505 val destToSrc = io.in.take(i).zipWithIndex.map { case (in, j) => 506 val indexMatch = in.bits.ldest === t 507 val writeMatch = cond3 && needIntDest(j) || cond2 && needFpDest(j) || cond1 && needVecDest(j) 508 val v0vlMatch = condV0 && needV0Dest(j) || condVl && needVlDest(j) 509 indexMatch && writeMatch || v0vlMatch 510 } 511 bypassCond(j)(i - 1) := VecInit(destToSrc).asUInt 512 } 513 io.out(i).bits.psrc(0) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(0)(i-1).asBools).foldLeft(uops(i).psrc(0)) { 514 (z, next) => Mux(next._2, next._1, z) 515 } 516 io.out(i).bits.psrc(1) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(1)(i-1).asBools).foldLeft(uops(i).psrc(1)) { 517 (z, next) => Mux(next._2, next._1, z) 518 } 519 io.out(i).bits.psrc(2) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(2)(i-1).asBools).foldLeft(uops(i).psrc(2)) { 520 (z, next) => Mux(next._2, next._1, z) 521 } 522 io.out(i).bits.psrc(3) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(3)(i-1).asBools).foldLeft(uops(i).psrc(3)) { 523 (z, next) => Mux(next._2, next._1, z) 524 } 525 io.out(i).bits.psrc(4) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(4)(i-1).asBools).foldLeft(uops(i).psrc(4)) { 526 (z, next) => Mux(next._2, next._1, z) 527 } 528 io.out(i).bits.pdest := Mux(isMove(i), io.out(i).bits.psrc(0), uops(i).pdest) 529 530 // Todo: better implementation for fields reuse 531 // For fused-lui-load, load.src(0) is replaced by the imm. 532 val last_is_lui = io.in(i - 1).bits.selImm === SelImm.IMM_U && io.in(i - 1).bits.srcType(0) =/= SrcType.pc 533 val this_is_load = io.in(i).bits.fuType === FuType.ldu.U 534 val lui_to_load = io.in(i - 1).valid && io.in(i - 1).bits.ldest === io.in(i).bits.lsrc(0) 535 val fused_lui_load = last_is_lui && this_is_load && lui_to_load 536 when (fused_lui_load) { 537 // The first LOAD operand (base address) is replaced by LUI-imm and stored in imm 538 val lui_imm = io.in(i - 1).bits.imm(ImmUnion.U.len - 1, 0) 539 val ld_imm = io.in(i).bits.imm(ImmUnion.I.len - 1, 0) 540 require(io.out(i).bits.imm.getWidth >= lui_imm.getWidth + ld_imm.getWidth) 541 io.out(i).bits.srcType(0) := SrcType.imm 542 io.out(i).bits.imm := Cat(lui_imm, ld_imm) 543 } 544 545 } 546 547 val genSnapshot = Cat(io.out.map(out => out.fire && out.bits.snapshot)).orR 548 val lastCycleCreateSnpt = RegInit(false.B) 549 lastCycleCreateSnpt := genSnapshot && !io.snptIsFull 550 val sameSnptDistance = (RobCommitWidth * 4).U 551 // notInSameSnpt: 1.robidxHead - snapLastEnq >= sameSnptDistance 2.no snap 552 val notInSameSnpt = GatedValidRegNext(distanceBetween(robIdxHeadNext, io.snptLastEnq.bits) >= sameSnptDistance || !io.snptLastEnq.valid) 553 val allowSnpt = if (EnableRenameSnapshot) notInSameSnpt && !lastCycleCreateSnpt && io.in.head.bits.firstUop else false.B 554 io.out.zip(io.in).foreach{ case (out, in) => out.bits.snapshot := allowSnpt && (!in.bits.preDecodeInfo.notCFI || FuType.isJump(in.bits.fuType)) && in.fire } 555 io.out.map{ x => 556 x.bits.hasException := Cat(selectFrontend(x.bits.exceptionVec) :+ x.bits.exceptionVec(illegalInstr) :+ x.bits.exceptionVec(virtualInstr)).orR || TriggerAction.isDmode(x.bits.trigger) 557 } 558 if(backendParams.debugEn){ 559 dontTouch(robIdxHeadNext) 560 dontTouch(notInSameSnpt) 561 dontTouch(genSnapshot) 562 } 563 intFreeList.io.snpt := io.snpt 564 fpFreeList.io.snpt := io.snpt 565 vecFreeList.io.snpt := io.snpt 566 v0FreeList.io.snpt := io.snpt 567 vlFreeList.io.snpt := io.snpt 568 intFreeList.io.snpt.snptEnq := genSnapshot 569 fpFreeList.io.snpt.snptEnq := genSnapshot 570 vecFreeList.io.snpt.snptEnq := genSnapshot 571 v0FreeList.io.snpt.snptEnq := genSnapshot 572 vlFreeList.io.snpt.snptEnq := genSnapshot 573 574 /** 575 * Instructions commit: update freelist and rename table 576 */ 577 for (i <- 0 until RabCommitWidth) { 578 val commitValid = io.rabCommits.isCommit && io.rabCommits.commitValid(i) 579 val walkValid = io.rabCommits.isWalk && io.rabCommits.walkValid(i) 580 581 // I. RAT Update 582 // When redirect happens (mis-prediction), don't update the rename table 583 io.intRenamePorts(i).wen := intSpecWen(i) 584 io.intRenamePorts(i).addr := uops(i).ldest(log2Ceil(IntLogicRegs) - 1, 0) 585 io.intRenamePorts(i).data := io.out(i).bits.pdest 586 587 io.fpRenamePorts(i).wen := fpSpecWen(i) 588 io.fpRenamePorts(i).addr := uops(i).ldest(log2Ceil(FpLogicRegs) - 1, 0) 589 io.fpRenamePorts(i).data := fpFreeList.io.allocatePhyReg(i) 590 591 io.vecRenamePorts(i).wen := vecSpecWen(i) 592 io.vecRenamePorts(i).addr := uops(i).ldest(log2Ceil(VecLogicRegs) - 1, 0) 593 io.vecRenamePorts(i).data := vecFreeList.io.allocatePhyReg(i) 594 595 io.v0RenamePorts(i).wen := v0SpecWen(i) 596 io.v0RenamePorts(i).addr := uops(i).ldest(log2Ceil(V0LogicRegs) - 1, 0) 597 io.v0RenamePorts(i).data := v0FreeList.io.allocatePhyReg(i) 598 599 io.vlRenamePorts(i).wen := vlSpecWen(i) 600 io.vlRenamePorts(i).addr := uops(i).ldest(log2Ceil(VlLogicRegs) - 1, 0) 601 io.vlRenamePorts(i).data := vlFreeList.io.allocatePhyReg(i) 602 603 // II. Free List Update 604 intFreeList.io.freeReq(i) := io.int_need_free(i) 605 intFreeList.io.freePhyReg(i) := RegNext(io.int_old_pdest(i)) 606 fpFreeList.io.freeReq(i) := GatedValidRegNext(commitValid && needDestRegCommit(Reg_F, io.rabCommits.info(i))) 607 fpFreeList.io.freePhyReg(i) := io.fp_old_pdest(i) 608 vecFreeList.io.freeReq(i) := GatedValidRegNext(commitValid && needDestRegCommit(Reg_V, io.rabCommits.info(i))) 609 vecFreeList.io.freePhyReg(i) := io.vec_old_pdest(i) 610 v0FreeList.io.freeReq(i) := GatedValidRegNext(commitValid && needDestRegCommit(Reg_V0, io.rabCommits.info(i))) 611 v0FreeList.io.freePhyReg(i) := io.v0_old_pdest(i) 612 vlFreeList.io.freeReq(i) := GatedValidRegNext(commitValid && needDestRegCommit(Reg_Vl, io.rabCommits.info(i))) 613 vlFreeList.io.freePhyReg(i) := io.vl_old_pdest(i) 614 } 615 616 /* 617 Debug and performance counters 618 */ 619 def printRenameInfo(in: DecoupledIO[DecodedInst], out: DecoupledIO[DynInst]) = { 620 XSInfo(out.fire, p"pc:${Hexadecimal(in.bits.pc)} in(${in.valid},${in.ready}) " + 621 p"lsrc(0):${in.bits.lsrc(0)} -> psrc(0):${out.bits.psrc(0)} " + 622 p"lsrc(1):${in.bits.lsrc(1)} -> psrc(1):${out.bits.psrc(1)} " + 623 p"lsrc(2):${in.bits.lsrc(2)} -> psrc(2):${out.bits.psrc(2)} " + 624 p"ldest:${in.bits.ldest} -> pdest:${out.bits.pdest}\n" 625 ) 626 } 627 628 for ((x,y) <- io.in.zip(io.out)) { 629 printRenameInfo(x, y) 630 } 631 632 io.out.map { case x => 633 when(x.valid && x.bits.rfWen){ 634 assert(x.bits.ldest =/= 0.U, "rfWen cannot be 1 when Int regfile ldest is 0") 635 } 636 } 637 val debugRedirect = RegEnable(io.redirect.bits, io.redirect.valid) 638 // bad speculation 639 val recStall = io.redirect.valid || io.rabCommits.isWalk 640 val ctrlRecStall = Mux(io.redirect.valid, io.redirect.bits.debugIsCtrl, io.rabCommits.isWalk && debugRedirect.debugIsCtrl) 641 val mvioRecStall = Mux(io.redirect.valid, io.redirect.bits.debugIsMemVio, io.rabCommits.isWalk && debugRedirect.debugIsMemVio) 642 val otherRecStall = recStall && !(ctrlRecStall || mvioRecStall) 643 XSPerfAccumulate("recovery_stall", recStall) 644 XSPerfAccumulate("control_recovery_stall", ctrlRecStall) 645 XSPerfAccumulate("mem_violation_recovery_stall", mvioRecStall) 646 XSPerfAccumulate("other_recovery_stall", otherRecStall) 647 // freelist stall 648 val notRecStall = !io.out.head.valid && !recStall 649 val intFlStall = notRecStall && inHeadValid && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !intFreeList.io.canAllocate 650 val fpFlStall = notRecStall && inHeadValid && intFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !fpFreeList.io.canAllocate 651 val vecFlStall = notRecStall && inHeadValid && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !vecFreeList.io.canAllocate 652 val v0FlStall = notRecStall && inHeadValid && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && vlFreeList.io.canAllocate && !v0FreeList.io.canAllocate 653 val vlFlStall = notRecStall && inHeadValid && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && !vlFreeList.io.canAllocate 654 val multiFlStall = notRecStall && inHeadValid && (PopCount(Cat( 655 !intFreeList.io.canAllocate, 656 !fpFreeList.io.canAllocate, 657 !vecFreeList.io.canAllocate, 658 !v0FreeList.io.canAllocate, 659 !vlFreeList.io.canAllocate, 660 )) > 1.U) 661 // other stall 662 val otherStall = notRecStall && !intFlStall && !fpFlStall && !vecFlStall && !v0FlStall && !vlFlStall && !multiFlStall 663 664 io.stallReason.in.backReason.valid := io.stallReason.out.backReason.valid || !io.in.head.ready 665 io.stallReason.in.backReason.bits := Mux(io.stallReason.out.backReason.valid, io.stallReason.out.backReason.bits, 666 MuxCase(TopDownCounters.OtherCoreStall.id.U, Seq( 667 ctrlRecStall -> TopDownCounters.ControlRecoveryStall.id.U, 668 mvioRecStall -> TopDownCounters.MemVioRecoveryStall.id.U, 669 otherRecStall -> TopDownCounters.OtherRecoveryStall.id.U, 670 intFlStall -> TopDownCounters.IntFlStall.id.U, 671 fpFlStall -> TopDownCounters.FpFlStall.id.U, 672 vecFlStall -> TopDownCounters.VecFlStall.id.U, 673 v0FlStall -> TopDownCounters.V0FlStall.id.U, 674 vlFlStall -> TopDownCounters.VlFlStall.id.U, 675 multiFlStall -> TopDownCounters.MultiFlStall.id.U, 676 ) 677 )) 678 io.stallReason.out.reason.zip(io.stallReason.in.reason).zip(io.in.map(_.valid)).foreach { case ((out, in), valid) => 679 out := Mux(io.stallReason.in.backReason.valid, io.stallReason.in.backReason.bits, in) 680 } 681 682 XSDebug(io.rabCommits.isWalk, p"Walk Recovery Enabled\n") 683 XSDebug(io.rabCommits.isWalk, p"validVec:${Binary(io.rabCommits.walkValid.asUInt)}\n") 684 for (i <- 0 until RabCommitWidth) { 685 val info = io.rabCommits.info(i) 686 XSDebug(io.rabCommits.isWalk && io.rabCommits.walkValid(i), p"[#$i walk info] " + 687 p"ldest:${info.ldest} rfWen:${info.rfWen} fpWen:${info.fpWen} vecWen:${info.vecWen} v0Wen:${info.v0Wen} vlWen:${info.vlWen}") 688 } 689 690 XSDebug(p"inValidVec: ${Binary(Cat(io.in.map(_.valid)))}\n") 691 692 XSPerfAccumulate("in_valid_count", PopCount(io.in.map(_.valid))) 693 XSPerfAccumulate("in_fire_count", PopCount(io.in.map(_.fire))) 694 XSPerfAccumulate("in_valid_not_ready_count", PopCount(io.in.map(x => x.valid && !x.ready))) 695 XSPerfAccumulate("wait_cycle", !io.in.head.valid && dispatchCanAcc) 696 697 // These stall reasons could overlap each other, but we configure the priority as fellows. 698 // walk stall > dispatch stall > int freelist stall > fp freelist stall 699 private val inHeadStall = io.in.head match { case x => x.valid && !x.ready } 700 private val stallForWalk = inHeadValid && io.rabCommits.isWalk 701 private val stallForDispatch = inHeadValid && !io.rabCommits.isWalk && !dispatchCanAcc 702 private val stallForIntFL = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !intFreeList.io.canAllocate 703 private val stallForFpFL = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !fpFreeList.io.canAllocate 704 private val stallForVecFL = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !vecFreeList.io.canAllocate 705 private val stallForV0FL = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && vlFreeList.io.canAllocate && !v0FreeList.io.canAllocate 706 private val stallForVlFL = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && !vlFreeList.io.canAllocate 707 XSPerfAccumulate("stall_cycle", inHeadStall) 708 XSPerfAccumulate("stall_cycle_walk", stallForWalk) 709 XSPerfAccumulate("stall_cycle_dispatch", stallForDispatch) 710 XSPerfAccumulate("stall_cycle_int", stallForIntFL) 711 XSPerfAccumulate("stall_cycle_fp", stallForFpFL) 712 XSPerfAccumulate("stall_cycle_vec", stallForVecFL) 713 XSPerfAccumulate("stall_cycle_vec", stallForV0FL) 714 XSPerfAccumulate("stall_cycle_vec", stallForVlFL) 715 716 XSPerfHistogram("in_valid_range", PopCount(io.in.map(_.valid)), true.B, 0, DecodeWidth + 1, 1) 717 XSPerfHistogram("in_fire_range", PopCount(io.in.map(_.fire)), true.B, 0, DecodeWidth + 1, 1) 718 XSPerfHistogram("out_valid_range", PopCount(io.out.map(_.valid)), true.B, 0, DecodeWidth + 1, 1) 719 XSPerfHistogram("out_fire_range", PopCount(io.out.map(_.fire)), true.B, 0, DecodeWidth + 1, 1) 720 721 XSPerfAccumulate("move_instr_count", PopCount(io.out.map(out => out.fire && out.bits.isMove))) 722 val is_fused_lui_load = io.out.map(o => o.fire && o.bits.fuType === FuType.ldu.U && o.bits.srcType(0) === SrcType.imm) 723 XSPerfAccumulate("fused_lui_load_instr_count", PopCount(is_fused_lui_load)) 724 725 val renamePerf = Seq( 726 ("rename_in ", PopCount(io.in.map(_.valid & io.in(0).ready )) ), 727 ("rename_waitinstr ", PopCount((0 until RenameWidth).map(i => io.in(i).valid && !io.in(i).ready)) ), 728 ("rename_stall ", inHeadStall), 729 ("rename_stall_cycle_walk ", inHeadValid && io.rabCommits.isWalk), 730 ("rename_stall_cycle_dispatch", inHeadValid && !io.rabCommits.isWalk && !dispatchCanAcc), 731 ("rename_stall_cycle_int ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !intFreeList.io.canAllocate), 732 ("rename_stall_cycle_fp ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !fpFreeList.io.canAllocate), 733 ("rename_stall_cycle_vec ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !vecFreeList.io.canAllocate), 734 ("rename_stall_cycle_v0 ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && vlFreeList.io.canAllocate && !v0FreeList.io.canAllocate), 735 ("rename_stall_cycle_vl ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && !vlFreeList.io.canAllocate), 736 ) 737 val intFlPerf = intFreeList.getPerfEvents 738 val fpFlPerf = fpFreeList.getPerfEvents 739 val vecFlPerf = vecFreeList.getPerfEvents 740 val v0FlPerf = v0FreeList.getPerfEvents 741 val vlFlPerf = vlFreeList.getPerfEvents 742 val perfEvents = renamePerf ++ intFlPerf ++ fpFlPerf ++ vecFlPerf ++ v0FlPerf ++ vlFlPerf 743 generatePerfEvent() 744} 745