1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.backend.rename 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utility._ 23import utils._ 24import xiangshan._ 25import xiangshan.backend.Bundles.{DecodedInst, DynInst} 26import xiangshan.backend.decode.{FusionDecodeInfo, ImmUnion, Imm_I, Imm_LUI_LOAD, Imm_U} 27import xiangshan.backend.fu.FuType 28import xiangshan.backend.rename.freelist._ 29import xiangshan.backend.rob.{RobEnqIO, RobPtr} 30import xiangshan.mem.mdp._ 31 32class Rename(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper with HasPerfEvents { 33 34 // params alias 35 private val numRegSrc = backendParams.numRegSrc 36 private val numVecRegSrc = backendParams.numVecRegSrc 37 private val numVecRatPorts = numVecRegSrc + 1 // +1 dst 38 39 println(s"[Rename] numRegSrc: $numRegSrc") 40 41 val io = IO(new Bundle() { 42 val redirect = Flipped(ValidIO(new Redirect)) 43 val robCommits = Input(new RobCommitIO) 44 // from decode 45 val in = Vec(RenameWidth, Flipped(DecoupledIO(new DecodedInst))) 46 val fusionInfo = Vec(DecodeWidth - 1, Flipped(new FusionDecodeInfo)) 47 // ssit read result 48 val ssit = Flipped(Vec(RenameWidth, Output(new SSITEntry))) 49 // waittable read result 50 val waittable = Flipped(Vec(RenameWidth, Output(Bool()))) 51 // to rename table 52 val intReadPorts = Vec(RenameWidth, Vec(3, Input(UInt(PhyRegIdxWidth.W)))) 53 val fpReadPorts = Vec(RenameWidth, Vec(4, Input(UInt(PhyRegIdxWidth.W)))) 54 val vecReadPorts = Vec(RenameWidth, Vec(numVecRatPorts, Input(UInt(PhyRegIdxWidth.W)))) 55 val intRenamePorts = Vec(RenameWidth, Output(new RatWritePort)) 56 val fpRenamePorts = Vec(RenameWidth, Output(new RatWritePort)) 57 val vecRenamePorts = Vec(RenameWidth, Output(new RatWritePort)) 58 // from rename table 59 val int_old_pdest = Vec(CommitWidth, Input(UInt(PhyRegIdxWidth.W))) 60 val fp_old_pdest = Vec(CommitWidth, Input(UInt(PhyRegIdxWidth.W))) 61 val vec_old_pdest = Vec(CommitWidth, Input(UInt(PhyRegIdxWidth.W))) 62 val int_need_free = Vec(CommitWidth, Input(Bool())) 63 // to dispatch1 64 val out = Vec(RenameWidth, DecoupledIO(new DynInst)) 65 // for snapshots 66 val snpt = Input(new SnapshotPort) 67 val snptLastEnq = Flipped(ValidIO(new RobPtr)) 68 val robIsEmpty = Input(Bool()) 69 val toDispatchIsFp = Output(Vec(RenameWidth,Bool())) 70 val toDispatchIsInt = Output(Vec(RenameWidth,Bool())) 71 // debug arch ports 72 val debug_int_rat = if (backendParams.debugEn) Some(Vec(32, Input(UInt(PhyRegIdxWidth.W)))) else None 73 val debug_vconfig_rat = if (backendParams.debugEn) Some(Input(UInt(PhyRegIdxWidth.W))) else None 74 val debug_fp_rat = if (backendParams.debugEn) Some(Vec(32, Input(UInt(PhyRegIdxWidth.W)))) else None 75 val debug_vec_rat = if (backendParams.debugEn) Some(Vec(32, Input(UInt(PhyRegIdxWidth.W)))) else None 76 // perf only 77 val stallReason = new Bundle { 78 val in = Flipped(new StallReasonIO(RenameWidth)) 79 val out = new StallReasonIO(RenameWidth) 80 } 81 }) 82 83 val compressUnit = Module(new CompressUnit()) 84 // create free list and rat 85 val intFreeList = Module(new MEFreeList(IntPhyRegs)) 86 val fpFreeList = Module(new StdFreeList(VfPhyRegs - FpLogicRegs - VecLogicRegs)) 87 88 intFreeList.io.commit <> io.robCommits 89 intFreeList.io.debug_rat.foreach(_ <> io.debug_int_rat.get) 90 fpFreeList.io.commit <> io.robCommits 91 fpFreeList.io.debug_rat.foreach(_ <> io.debug_fp_rat.get) 92 93 // decide if given instruction needs allocating a new physical register (CfCtrl: from decode; RobCommitInfo: from rob) 94 // fp and vec share `fpFreeList` 95 def needDestReg[T <: DecodedInst](reg_t: RegType, x: T): Bool = reg_t match { 96 case Reg_I => x.rfWen && x.ldest =/= 0.U 97 case Reg_F => x.fpWen 98 case Reg_V => x.vecWen 99 } 100 def needDestRegCommit[T <: RobCommitInfo](reg_t: RegType, x: T): Bool = { 101 reg_t match { 102 case Reg_I => x.rfWen 103 case Reg_F => x.fpWen 104 case Reg_V => x.vecWen 105 } 106 } 107 def needDestRegWalk[T <: RobCommitInfo](reg_t: RegType, x: T): Bool = { 108 reg_t match { 109 case Reg_I => x.rfWen && x.ldest =/= 0.U 110 case Reg_F => x.fpWen 111 case Reg_V => x.vecWen 112 } 113 } 114 115 // connect [redirect + walk] ports for __float point__ & __integer__ free list 116 Seq(fpFreeList, intFreeList).foreach { case fl => 117 fl.io.redirect := io.redirect.valid 118 fl.io.walk := io.robCommits.isWalk 119 } 120 // only when both fp and int free list and dispatch1 has enough space can we do allocation 121 // when isWalk, freelist can definitely allocate 122 intFreeList.io.doAllocate := fpFreeList.io.canAllocate && io.out.map(_.ready).reduce(_ || _) || io.robCommits.isWalk 123 fpFreeList.io.doAllocate := intFreeList.io.canAllocate && io.out.map(_.ready).reduce(_ || _) || io.robCommits.isWalk 124 125 // dispatch1 ready ++ float point free list ready ++ int free list ready ++ not walk 126 val canOut = io.out(0).ready && fpFreeList.io.canAllocate && intFreeList.io.canAllocate && !io.robCommits.isWalk 127 128 compressUnit.io.in.zip(io.in).foreach{ case(sink, source) => 129 sink.valid := source.valid 130 sink.bits := source.bits 131 } 132 val needRobFlags = compressUnit.io.out.needRobFlags 133 val instrSizesVec = compressUnit.io.out.instrSizes 134 val compressMasksVec = compressUnit.io.out.masks 135 136 // speculatively assign the instruction with an robIdx 137 val validCount = PopCount(io.in.zip(needRobFlags).map{ case(in, needRobFlag) => in.valid && in.bits.lastUop && needRobFlag}) // number of instructions waiting to enter rob (from decode) 138 val robIdxHead = RegInit(0.U.asTypeOf(new RobPtr)) 139 val lastCycleMisprediction = RegNext(io.redirect.valid && !io.redirect.bits.flushItself()) 140 val robIdxHeadNext = Mux(io.redirect.valid, io.redirect.bits.robIdx, // redirect: move ptr to given rob index 141 Mux(lastCycleMisprediction, robIdxHead + 1.U, // mis-predict: not flush robIdx itself 142 Mux(canOut && io.in(0).fire, robIdxHead + validCount, // instructions successfully entered next stage: increase robIdx 143 /* default */ robIdxHead))) // no instructions passed by this cycle: stick to old value 144 robIdxHead := robIdxHeadNext 145 146 /** 147 * Rename: allocate free physical register and update rename table 148 */ 149 val uops = Wire(Vec(RenameWidth, new DynInst)) 150 uops.foreach( uop => { 151 uop.srcState := DontCare 152 uop.debugInfo := DontCare 153 uop.lqIdx := DontCare 154 uop.sqIdx := DontCare 155 uop.waitForRobIdx := DontCare 156 uop.singleStep := DontCare 157 uop.snapshot := DontCare 158 uop.dataSource := DontCare 159 uop.l1ExuOH := DontCare 160 }) 161 162 require(RenameWidth >= CommitWidth) 163 val needVecDest = Wire(Vec(RenameWidth, Bool())) 164 val needFpDest = Wire(Vec(RenameWidth, Bool())) 165 val needIntDest = Wire(Vec(RenameWidth, Bool())) 166 val hasValid = Cat(io.in.map(_.valid)).orR 167 private val inHeadValid = io.in.head.valid 168 169 val isMove = Wire(Vec(RenameWidth, Bool())) 170 isMove zip io.in.map(_.bits) foreach { 171 case (move, in) => move := Mux(in.exceptionVec.asUInt.orR, false.B, in.isMove) 172 } 173 174 val walkNeedIntDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B))) 175 val walkNeedFpDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B))) 176 val walkNeedVecDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B))) 177 val walkIsMove = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B))) 178 179 val intSpecWen = Wire(Vec(RenameWidth, Bool())) 180 val fpSpecWen = Wire(Vec(RenameWidth, Bool())) 181 val vecSpecWen = Wire(Vec(RenameWidth, Bool())) 182 183 val walkIntSpecWen = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B))) 184 185 val walkPdest = Wire(Vec(RenameWidth, UInt(PhyRegIdxWidth.W))) 186 187 val hasInstr = RegInit(false.B) 188 hasInstr := io.in.head.valid && !io.in.head.ready 189 val outFireNum = RegInit(0.U(RenameWidth.U.getWidth.W)) 190 val outFireNumNext = Mux(io.in.head.fire || io.redirect.valid, 0.U, outFireNum + PopCount(io.out.map(_.fire))) 191 outFireNum := outFireNumNext 192 val inValidNum = PopCount(io.in.map(_.valid)) 193 val allOut = inValidNum === outFireNum + PopCount(io.out.map(_.fire)) 194 val outValidMask = Wire(Vec(RenameWidth, Bool())) 195 outValidMask.zipWithIndex.map{ case(m,i) => 196 m := Mux(hasInstr, Mux(outFireNum > PopCount(io.in.map(_.valid).take(i)), false.B, true.B), true.B) 197 } 198 val validWaitForward = io.in.map(_.bits.waitForward).zip(outValidMask).map(x => x._1 && x._2) 199 val isWaitForward = VecInit((0 until RenameWidth).map(i => validWaitForward.take(i).fold(false.B)(_ || _))) 200 val pdestReg = Reg(Vec(RenameWidth, chiselTypeOf(uops.head.pdest))) 201 // uop calculation 202 for (i <- 0 until RenameWidth) { 203 for ((name, data) <- uops(i).elements) { 204 if (io.in(i).bits.elements.contains(name)) { 205 data := io.in(i).bits.elements(name) 206 } 207 } 208 209 // update cf according to ssit result 210 uops(i).storeSetHit := io.ssit(i).valid 211 uops(i).loadWaitStrict := io.ssit(i).strict && io.ssit(i).valid 212 uops(i).ssid := io.ssit(i).ssid 213 214 // update cf according to waittable result 215 uops(i).loadWaitBit := io.waittable(i) 216 217 uops(i).replayInst := false.B // set by IQ or MemQ 218 // alloc a new phy reg, fp and vec share the `fpFreeList` 219 needVecDest (i) := io.in(i).valid && io.out(i).fire && needDestReg(Reg_V,io.in(i).bits) && outValidMask(i) && fpFreeList.io.canAllocate && fpFreeList.io.doAllocate && !io.robCommits.isWalk && !io.redirect.valid 220 needFpDest (i) := io.in(i).valid && io.out(i).fire && needDestReg(Reg_F,io.in(i).bits) && outValidMask(i) && fpFreeList.io.canAllocate && fpFreeList.io.doAllocate && !io.robCommits.isWalk && !io.redirect.valid 221 needIntDest (i) := io.in(i).valid && io.out(i).fire && needDestReg(Reg_I,io.in(i).bits) && outValidMask(i) && intFreeList.io.canAllocate && intFreeList.io.doAllocate && !io.robCommits.isWalk && !io.redirect.valid 222 if (i < CommitWidth) { 223 walkNeedIntDest(i) := io.robCommits.walkValid(i) && needDestRegWalk(Reg_I, io.robCommits.info(i)) 224 walkNeedFpDest(i) := io.robCommits.walkValid(i) && needDestRegWalk(Reg_F, io.robCommits.info(i)) 225 walkNeedVecDest(i) := io.robCommits.walkValid(i) && needDestRegWalk(Reg_V, io.robCommits.info(i)) 226 walkIsMove(i) := io.robCommits.info(i).isMove 227 } 228 fpFreeList.io.allocateReq(i) := needFpDest(i) || needVecDest(i) 229 fpFreeList.io.walkReq(i) := walkNeedFpDest(i) || walkNeedVecDest(i) 230 intFreeList.io.allocateReq(i) := needIntDest(i) && !isMove(i) 231 intFreeList.io.walkReq(i) := walkNeedIntDest(i) && !walkIsMove(i) 232 233 // no valid instruction from decode stage || all resources (dispatch1 + both free lists) ready 234 io.in(i).ready := !hasValid || (canOut && allOut) 235 236 uops(i).robIdx := robIdxHead + PopCount(io.in.zip(needRobFlags).take(i).map{ case(in, needRobFlag) => in.valid && in.bits.lastUop && needRobFlag}) 237 uops(i).instrSize := instrSizesVec(i) 238 when(isMove(i)) { 239 uops(i).numUops := 0.U 240 uops(i).numWB := 0.U 241 } 242 if (i > 0) { 243 when(!needRobFlags(i - 1)) { 244 uops(i).firstUop := false.B 245 uops(i).ftqPtr := uops(i - 1).ftqPtr 246 uops(i).ftqOffset := uops(i - 1).ftqOffset 247 uops(i).numUops := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse)) 248 uops(i).numWB := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse)) 249 } 250 } 251 when(!needRobFlags(i)) { 252 uops(i).lastUop := false.B 253 uops(i).numUops := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse)) 254 uops(i).numWB := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse)) 255 } 256 uops(i).wfflags := (compressMasksVec(i) & Cat(io.in.map(_.bits.wfflags).reverse)).orR 257 uops(i).dirtyFs := (compressMasksVec(i) & Cat(io.in.map(_.bits.fpWen).reverse)).orR 258 259 uops(i).psrc(0) := Mux1H(uops(i).srcType(0), Seq(io.intReadPorts(i)(0), io.fpReadPorts(i)(0), io.vecReadPorts(i)(0))) 260 uops(i).psrc(1) := Mux1H(uops(i).srcType(1), Seq(io.intReadPorts(i)(1), io.fpReadPorts(i)(1), io.vecReadPorts(i)(1))) 261 uops(i).psrc(2) := Mux1H(uops(i).srcType(2)(2, 1), Seq(io.fpReadPorts(i)(2), io.vecReadPorts(i)(2))) 262 uops(i).psrc(3) := io.vecReadPorts(i)(3) 263 uops(i).psrc(4) := io.vecReadPorts(i)(4) // Todo: vl read port 264 265 // int psrc2 should be bypassed from next instruction if it is fused 266 if (i < RenameWidth - 1) { 267 when (io.fusionInfo(i).rs2FromRs2 || io.fusionInfo(i).rs2FromRs1) { 268 uops(i).psrc(1) := Mux(io.fusionInfo(i).rs2FromRs2, io.intReadPorts(i + 1)(1), io.intReadPorts(i + 1)(0)) 269 }.elsewhen(io.fusionInfo(i).rs2FromZero) { 270 uops(i).psrc(1) := 0.U 271 } 272 } 273 uops(i).eliminatedMove := isMove(i) 274 275 // update pdest 276 val pdestWire = MuxCase(0.U, Seq( 277 (needIntDest(i) && !isMove(i)) -> intFreeList.io.allocatePhyReg(i), 278 (needFpDest(i) || needVecDest(i)) -> fpFreeList.io.allocatePhyReg(i), 279 )) 280 pdestReg(i) := Mux(io.out(i).fire, pdestWire, pdestReg(i)) 281 uops(i).pdest := Mux(io.out(i).fire, pdestWire, pdestReg(i)) 282 283 // Assign performance counters 284 uops(i).debugInfo.renameTime := GTimer() 285 286 dontTouch(isWaitForward) 287 io.out(i).valid := !isWaitForward(i) && (!io.in(i).bits.waitForward || (io.in(i).bits.waitForward && io.robIsEmpty) ) && outValidMask(i) && io.in(i).valid && io.out(i).ready && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && !io.robCommits.isWalk 288 io.out(i).bits := uops(i) 289 // Todo: move these shit in decode stage 290 // dirty code for fence. The lsrc is passed by imm. 291 when (io.out(i).bits.fuType === FuType.fence.U) { 292 io.out(i).bits.imm := Cat(io.in(i).bits.lsrc(1), io.in(i).bits.lsrc(0)) 293 } 294 295 // dirty code for SoftPrefetch (prefetch.r/prefetch.w) 296// when (io.in(i).bits.isSoftPrefetch) { 297// io.out(i).bits.fuType := FuType.ldu.U 298// io.out(i).bits.fuOpType := Mux(io.in(i).bits.lsrc(1) === 1.U, LSUOpType.prefetch_r, LSUOpType.prefetch_w) 299// io.out(i).bits.selImm := SelImm.IMM_S 300// io.out(i).bits.imm := Cat(io.in(i).bits.imm(io.in(i).bits.imm.getWidth - 1, 5), 0.U(5.W)) 301// } 302 303 // dirty code for lui+addi(w) fusion 304 if (i < RenameWidth - 1) { 305 val fused_lui32 = io.in(i).bits.selImm === SelImm.IMM_LUI32 && io.in(i).bits.fuType === FuType.alu.U 306 when (fused_lui32) { 307 val lui_imm = io.in(i).bits.imm(19, 0) 308 val add_imm = io.in(i + 1).bits.imm(11, 0) 309 io.out(i).bits.imm := Imm_LUI_LOAD().immFromLuiLoad(lui_imm, add_imm) 310 val lsrcWidth = uops(i).lsrc.head.getWidth 311 val lui_imm_in_imm = ImmUnion.maxLen - Imm_I().len 312 val left_lui_imm = Imm_U().len - lui_imm_in_imm 313 require(2 * lsrcWidth >= left_lui_imm, "cannot fused lui and addi(w) with lsrc") 314 io.out(i).bits.lsrc(0) := lui_imm(lui_imm_in_imm + lsrcWidth - 1, lui_imm_in_imm) 315 io.out(i).bits.lsrc(1) := lui_imm(lui_imm.getWidth - 1, lui_imm_in_imm + lsrcWidth) 316 } 317 } 318 319 // write speculative rename table 320 // we update rat later inside commit code 321 intSpecWen(i) := needIntDest(i) && intFreeList.io.canAllocate && intFreeList.io.doAllocate && !io.robCommits.isWalk && !io.redirect.valid 322 fpSpecWen(i) := needFpDest(i) && fpFreeList.io.canAllocate && fpFreeList.io.doAllocate && !io.robCommits.isWalk && !io.redirect.valid 323 vecSpecWen(i) := needVecDest(i) && fpFreeList.io.canAllocate && fpFreeList.io.doAllocate && !io.robCommits.isWalk && !io.redirect.valid 324 io.toDispatchIsFp := fpSpecWen.zip(vecSpecWen).map{ case(fp, vec) => fp || vec } 325 io.toDispatchIsInt := intSpecWen 326 327 if (i < CommitWidth) { 328 walkIntSpecWen(i) := walkNeedIntDest(i) && !io.redirect.valid 329 walkPdest(i) := io.robCommits.info(i).pdest 330 } else { 331 walkPdest(i) := io.out(i).bits.pdest 332 } 333 } 334 335 /** 336 * How to set psrc: 337 * - bypass the pdest to psrc if previous instructions write to the same ldest as lsrc 338 * - default: psrc from RAT 339 * How to set pdest: 340 * - Mux(isMove, psrc, pdest_from_freelist). 341 * 342 * The critical path of rename lies here: 343 * When move elimination is enabled, we need to update the rat with psrc. 344 * However, psrc maybe comes from previous instructions' pdest, which comes from freelist. 345 * 346 * If we expand these logic for pdest(N): 347 * pdest(N) = Mux(isMove(N), psrc(N), freelist_out(N)) 348 * = Mux(isMove(N), Mux(bypass(N, N - 1), pdest(N - 1), 349 * Mux(bypass(N, N - 2), pdest(N - 2), 350 * ... 351 * Mux(bypass(N, 0), pdest(0), 352 * rat_out(N))...)), 353 * freelist_out(N)) 354 */ 355 // a simple functional model for now 356 io.out(0).bits.pdest := Mux(isMove(0), uops(0).psrc.head, uops(0).pdest) 357 358 // psrc(n) + pdest(1) 359 val bypassCond: Vec[MixedVec[UInt]] = Wire(Vec(numRegSrc + 1, MixedVec(List.tabulate(RenameWidth-1)(i => UInt((i+1).W))))) 360 require(io.in(0).bits.srcType.size == io.in(0).bits.numSrc) 361 private val pdestLoc = io.in.head.bits.srcType.size // 2 vector src: v0, vl&vtype 362 println(s"[Rename] idx of pdest in bypassCond $pdestLoc") 363 for (i <- 1 until RenameWidth) { 364 val vecCond = io.in(i).bits.srcType.map(_ === SrcType.vp) :+ needVecDest(i) 365 val fpCond = io.in(i).bits.srcType.map(_ === SrcType.fp) :+ needFpDest(i) 366 val intCond = io.in(i).bits.srcType.map(_ === SrcType.xp) :+ needIntDest(i) 367 val target = io.in(i).bits.lsrc :+ io.in(i).bits.ldest 368 for (((((cond1, cond2), cond3), t), j) <- vecCond.zip(fpCond).zip(intCond).zip(target).zipWithIndex) { 369 val destToSrc = io.in.take(i).zipWithIndex.map { case (in, j) => 370 val indexMatch = in.bits.ldest === t 371 val writeMatch = cond3 && needIntDest(j) || cond2 && needFpDest(j) || cond1 && needVecDest(j) 372 indexMatch && writeMatch 373 } 374 bypassCond(j)(i - 1) := VecInit(destToSrc).asUInt 375 } 376 io.out(i).bits.psrc(0) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(0)(i-1).asBools).foldLeft(uops(i).psrc(0)) { 377 (z, next) => Mux(next._2, next._1, z) 378 } 379 io.out(i).bits.psrc(1) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(1)(i-1).asBools).foldLeft(uops(i).psrc(1)) { 380 (z, next) => Mux(next._2, next._1, z) 381 } 382 io.out(i).bits.psrc(2) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(2)(i-1).asBools).foldLeft(uops(i).psrc(2)) { 383 (z, next) => Mux(next._2, next._1, z) 384 } 385 io.out(i).bits.psrc(3) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(3)(i-1).asBools).foldLeft(uops(i).psrc(3)) { 386 (z, next) => Mux(next._2, next._1, z) 387 } 388 io.out(i).bits.psrc(4) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(4)(i-1).asBools).foldLeft(uops(i).psrc(4)) { 389 (z, next) => Mux(next._2, next._1, z) 390 } 391 io.out(i).bits.pdest := Mux(isMove(i), io.out(i).bits.psrc(0), uops(i).pdest) 392 393 // Todo: better implementation for fields reuse 394 // For fused-lui-load, load.src(0) is replaced by the imm. 395 val last_is_lui = io.in(i - 1).bits.selImm === SelImm.IMM_U && io.in(i - 1).bits.srcType(0) =/= SrcType.pc 396 val this_is_load = io.in(i).bits.fuType === FuType.ldu.U 397 val lui_to_load = io.in(i - 1).valid && io.in(i - 1).bits.ldest === io.in(i).bits.lsrc(0) 398 val fused_lui_load = last_is_lui && this_is_load && lui_to_load 399 when (fused_lui_load) { 400 // The first LOAD operand (base address) is replaced by LUI-imm and stored in {psrc, imm} 401 val lui_imm = io.in(i - 1).bits.imm(19, 0) 402 val ld_imm = io.in(i).bits.imm 403 io.out(i).bits.srcType(0) := SrcType.imm 404 io.out(i).bits.imm := Imm_LUI_LOAD().immFromLuiLoad(lui_imm, ld_imm) 405 val psrcWidth = uops(i).psrc.head.getWidth 406 val lui_imm_in_imm = 20/*Todo: uops(i).imm.getWidth*/ - Imm_I().len 407 val left_lui_imm = Imm_U().len - lui_imm_in_imm 408 require(2 * psrcWidth >= left_lui_imm, "cannot fused lui and load with psrc") 409 io.out(i).bits.psrc(0) := lui_imm(lui_imm_in_imm + psrcWidth - 1, lui_imm_in_imm) 410 io.out(i).bits.psrc(1) := lui_imm(lui_imm.getWidth - 1, lui_imm_in_imm + psrcWidth) 411 } 412 413 } 414 415 val genSnapshot = Cat(io.out.map(out => out.fire && out.bits.snapshot)).orR 416 val snapshotCtr = RegInit((4 * CommitWidth).U) 417 val notInSameSnpt = RegNext(distanceBetween(robIdxHeadNext, io.snptLastEnq.bits) >= CommitWidth.U || !io.snptLastEnq.valid) 418 val allowSnpt = if (EnableRenameSnapshot) !hasInstr && !snapshotCtr.orR && notInSameSnpt && io.in.head.bits.firstUop else false.B 419 io.out.zip(io.in).foreach{ case (out, in) => out.bits.snapshot := allowSnpt && (!in.bits.preDecodeInfo.notCFI || FuType.isJump(in.bits.fuType)) && in.fire } 420 when(genSnapshot) { 421 snapshotCtr := (4 * CommitWidth).U - PopCount(io.out.map(_.fire)) 422 }.elsewhen(io.out.head.fire) { 423 snapshotCtr := Mux(snapshotCtr < PopCount(io.out.map(_.fire)), 0.U, snapshotCtr - PopCount(io.out.map(_.fire))) 424 } 425 426 intFreeList.io.snpt := io.snpt 427 fpFreeList.io.snpt := io.snpt 428 intFreeList.io.snpt.snptEnq := genSnapshot 429 fpFreeList.io.snpt.snptEnq := genSnapshot 430 431 /** 432 * Instructions commit: update freelist and rename table 433 */ 434 for (i <- 0 until CommitWidth) { 435 val commitValid = io.robCommits.isCommit && io.robCommits.commitValid(i) 436 val walkValid = io.robCommits.isWalk && io.robCommits.walkValid(i) 437 438 // I. RAT Update 439 // When redirect happens (mis-prediction), don't update the rename table 440 io.intRenamePorts(i).wen := intSpecWen(i) 441 io.intRenamePorts(i).addr := uops(i).ldest 442 io.intRenamePorts(i).data := io.out(i).bits.pdest 443 444 io.fpRenamePorts(i).wen := fpSpecWen(i) 445 io.fpRenamePorts(i).addr := uops(i).ldest 446 io.fpRenamePorts(i).data := fpFreeList.io.allocatePhyReg(i) 447 448 io.vecRenamePorts(i).wen := vecSpecWen(i) 449 io.vecRenamePorts(i).addr := uops(i).ldest 450 io.vecRenamePorts(i).data := fpFreeList.io.allocatePhyReg(i) 451 452 // II. Free List Update 453 intFreeList.io.freeReq(i) := io.int_need_free(i) 454 intFreeList.io.freePhyReg(i) := RegNext(io.int_old_pdest(i)) 455 fpFreeList.io.freeReq(i) := RegNext(commitValid && (needDestRegCommit(Reg_F, io.robCommits.info(i)) || needDestRegCommit(Reg_V, io.robCommits.info(i)))) 456 fpFreeList.io.freePhyReg(i) := Mux(RegNext(needDestRegCommit(Reg_F, io.robCommits.info(i))), io.fp_old_pdest(i), io.vec_old_pdest(i)) 457 } 458 459 /* 460 Debug and performance counters 461 */ 462 def printRenameInfo(in: DecoupledIO[DecodedInst], out: DecoupledIO[DynInst]) = { 463 XSInfo(out.fire, p"pc:${Hexadecimal(in.bits.pc)} in(${in.valid},${in.ready}) " + 464 p"lsrc(0):${in.bits.lsrc(0)} -> psrc(0):${out.bits.psrc(0)} " + 465 p"lsrc(1):${in.bits.lsrc(1)} -> psrc(1):${out.bits.psrc(1)} " + 466 p"lsrc(2):${in.bits.lsrc(2)} -> psrc(2):${out.bits.psrc(2)} " + 467 p"ldest:${in.bits.ldest} -> pdest:${out.bits.pdest}\n" 468 ) 469 } 470 471 for ((x,y) <- io.in.zip(io.out)) { 472 printRenameInfo(x, y) 473 } 474 475 val debugRedirect = RegEnable(io.redirect.bits, io.redirect.valid) 476 // bad speculation 477 val recStall = io.redirect.valid || io.robCommits.isWalk 478 val ctrlRecStall = Mux(io.redirect.valid, io.redirect.bits.debugIsCtrl, io.robCommits.isWalk && debugRedirect.debugIsCtrl) 479 val mvioRecStall = Mux(io.redirect.valid, io.redirect.bits.debugIsMemVio, io.robCommits.isWalk && debugRedirect.debugIsMemVio) 480 val otherRecStall = recStall && !(ctrlRecStall || mvioRecStall) 481 XSPerfAccumulate("recovery_stall", recStall) 482 XSPerfAccumulate("control_recovery_stall", ctrlRecStall) 483 XSPerfAccumulate("mem_violation_recovery_stall", mvioRecStall) 484 XSPerfAccumulate("other_recovery_stall", otherRecStall) 485 // freelist stall 486 val notRecStall = !io.out.head.valid && !recStall 487 val intFlStall = notRecStall && inHeadValid && !intFreeList.io.canAllocate 488 val fpFlStall = notRecStall && inHeadValid && intFreeList.io.canAllocate && !fpFreeList.io.canAllocate 489 // other stall 490 val otherStall = notRecStall && !intFlStall && !fpFlStall 491 492 io.stallReason.in.backReason.valid := io.stallReason.out.backReason.valid || !io.in.head.ready 493 io.stallReason.in.backReason.bits := Mux(io.stallReason.out.backReason.valid, io.stallReason.out.backReason.bits, 494 MuxCase(TopDownCounters.OtherCoreStall.id.U, Seq( 495 ctrlRecStall -> TopDownCounters.ControlRecoveryStall.id.U, 496 mvioRecStall -> TopDownCounters.MemVioRecoveryStall.id.U, 497 otherRecStall -> TopDownCounters.OtherRecoveryStall.id.U, 498 intFlStall -> TopDownCounters.IntFlStall.id.U, 499 fpFlStall -> TopDownCounters.FpFlStall.id.U 500 ) 501 )) 502 io.stallReason.out.reason.zip(io.stallReason.in.reason).zip(io.in.map(_.valid)).foreach { case ((out, in), valid) => 503 out := Mux(io.stallReason.in.backReason.valid, io.stallReason.in.backReason.bits, in) 504 } 505 506 XSDebug(io.robCommits.isWalk, p"Walk Recovery Enabled\n") 507 XSDebug(io.robCommits.isWalk, p"validVec:${Binary(io.robCommits.walkValid.asUInt)}\n") 508 for (i <- 0 until CommitWidth) { 509 val info = io.robCommits.info(i) 510 XSDebug(io.robCommits.isWalk && io.robCommits.walkValid(i), p"[#$i walk info] pc:${Hexadecimal(info.pc)} " + 511 p"ldest:${info.ldest} rfWen:${info.rfWen} fpWen:${info.fpWen} vecWen:${info.vecWen}") 512 } 513 514 XSDebug(p"inValidVec: ${Binary(Cat(io.in.map(_.valid)))}\n") 515 516 XSPerfAccumulate("in_valid_count", PopCount(io.in.map(_.valid))) 517 XSPerfAccumulate("in_fire_count", PopCount(io.in.map(_.fire))) 518 XSPerfAccumulate("in_valid_not_ready_count", PopCount(io.in.map(x => x.valid && !x.ready))) 519 XSPerfAccumulate("wait_cycle", !io.in.head.valid && io.out.head.ready) 520 521 // These stall reasons could overlap each other, but we configure the priority as fellows. 522 // walk stall > dispatch stall > int freelist stall > fp freelist stall 523 private val inHeadStall = io.in.head match { case x => x.valid && !x.ready } 524 private val stallForWalk = inHeadValid && io.robCommits.isWalk 525 private val stallForDispatch = inHeadValid && !io.robCommits.isWalk && !io.out(0).ready 526 private val stallForIntFL = inHeadValid && !io.robCommits.isWalk && io.out(0).ready && !intFreeList.io.canAllocate 527 private val stallForFpFL = inHeadValid && !io.robCommits.isWalk && io.out(0).ready && intFreeList.io.canAllocate && !fpFreeList.io.canAllocate 528 XSPerfAccumulate("stall_cycle", inHeadStall) 529 XSPerfAccumulate("stall_cycle_walk", stallForWalk) 530 XSPerfAccumulate("stall_cycle_dispatch", stallForDispatch) 531 XSPerfAccumulate("stall_cycle_int", stallForIntFL) 532 XSPerfAccumulate("stall_cycle_fp", stallForFpFL) 533 534 XSPerfHistogram("in_valid_range", PopCount(io.in.map(_.valid)), true.B, 0, DecodeWidth + 1, 1) 535 XSPerfHistogram("in_fire_range", PopCount(io.in.map(_.fire)), true.B, 0, DecodeWidth + 1, 1) 536 XSPerfHistogram("out_valid_range", PopCount(io.out.map(_.valid)), true.B, 0, DecodeWidth + 1, 1) 537 XSPerfHistogram("out_fire_range", PopCount(io.out.map(_.fire)), true.B, 0, DecodeWidth + 1, 1) 538 539 XSPerfAccumulate("move_instr_count", PopCount(io.out.map(out => out.fire && out.bits.isMove))) 540 val is_fused_lui_load = io.out.map(o => o.fire && o.bits.fuType === FuType.ldu.U && o.bits.srcType(0) === SrcType.imm) 541 XSPerfAccumulate("fused_lui_load_instr_count", PopCount(is_fused_lui_load)) 542 543 val renamePerf = Seq( 544 ("rename_in ", PopCount(io.in.map(_.valid & io.in(0).ready )) ), 545 ("rename_waitinstr ", PopCount((0 until RenameWidth).map(i => io.in(i).valid && !io.in(i).ready)) ), 546 ("rename_stall ", inHeadStall), 547 ("rename_stall_cycle_walk ", inHeadValid && io.robCommits.isWalk), 548 ("rename_stall_cycle_dispatch", inHeadValid && !io.robCommits.isWalk && !io.out(0).ready), 549 ("rename_stall_cycle_int ", inHeadValid && !io.robCommits.isWalk && io.out(0).ready && !intFreeList.io.canAllocate), 550 ("rename_stall_cycle_fp ", inHeadValid && !io.robCommits.isWalk && io.out(0).ready && intFreeList.io.canAllocate && !fpFreeList.io.canAllocate), 551 ) 552 val intFlPerf = intFreeList.getPerfEvents 553 val fpFlPerf = fpFreeList.getPerfEvents 554 val perfEvents = renamePerf ++ intFlPerf ++ fpFlPerf 555 generatePerfEvent() 556} 557