1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.backend.rename 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import xiangshan._ 23import utils._ 24import xiangshan.backend.decode.{Imm_I, Imm_LUI_LOAD, Imm_U} 25import xiangshan.backend.rob.RobPtr 26import xiangshan.backend.rename.freelist._ 27import xiangshan.mem.mdp._ 28 29class Rename(implicit p: Parameters) extends XSModule with HasPerfEvents { 30 val io = IO(new Bundle() { 31 val redirect = Flipped(ValidIO(new Redirect)) 32 val robCommits = Flipped(new RobCommitIO) 33 // from decode 34 val in = Vec(RenameWidth, Flipped(DecoupledIO(new CfCtrl))) 35 // ssit read result 36 val ssit = Flipped(Vec(RenameWidth, Output(new SSITEntry))) 37 // waittable read result 38 val waittable = Flipped(Vec(RenameWidth, Output(Bool()))) 39 // to rename table 40 val intReadPorts = Vec(RenameWidth, Vec(3, Input(UInt(PhyRegIdxWidth.W)))) 41 val fpReadPorts = Vec(RenameWidth, Vec(4, Input(UInt(PhyRegIdxWidth.W)))) 42 val intRenamePorts = Vec(RenameWidth, Output(new RatWritePort)) 43 val fpRenamePorts = Vec(RenameWidth, Output(new RatWritePort)) 44 // to dispatch1 45 val out = Vec(RenameWidth, DecoupledIO(new MicroOp)) 46 }) 47 48 // create free list and rat 49 val intFreeList = Module(new MEFreeList(NRPhyRegs)) 50 val intRefCounter = Module(new RefCounter(NRPhyRegs)) 51 val fpFreeList = Module(new StdFreeList(NRPhyRegs - 32)) 52 53 // decide if given instruction needs allocating a new physical register (CfCtrl: from decode; RobCommitInfo: from rob) 54 def needDestReg[T <: CfCtrl](fp: Boolean, x: T): Bool = { 55 {if(fp) x.ctrl.fpWen else x.ctrl.rfWen && (x.ctrl.ldest =/= 0.U)} 56 } 57 def needDestRegCommit[T <: RobCommitInfo](fp: Boolean, x: T): Bool = { 58 if(fp) x.fpWen else x.rfWen 59 } 60 61 // connect [redirect + walk] ports for __float point__ & __integer__ free list 62 Seq((fpFreeList, true), (intFreeList, false)).foreach{ case (fl, isFp) => 63 fl.io.redirect := io.redirect.valid 64 fl.io.walk := io.robCommits.isWalk 65 // when isWalk, use stepBack to restore head pointer of free list 66 // (if ME enabled, stepBack of intFreeList should be useless thus optimized out) 67 fl.io.stepBack := PopCount(io.robCommits.valid.zip(io.robCommits.info).map{case (v, i) => v && needDestRegCommit(isFp, i)}) 68 } 69 // walk has higher priority than allocation and thus we don't use isWalk here 70 // only when both fp and int free list and dispatch1 has enough space can we do allocation 71 intFreeList.io.doAllocate := fpFreeList.io.canAllocate && io.out(0).ready 72 fpFreeList.io.doAllocate := intFreeList.io.canAllocate && io.out(0).ready 73 74 // dispatch1 ready ++ float point free list ready ++ int free list ready ++ not walk 75 val canOut = io.out(0).ready && fpFreeList.io.canAllocate && intFreeList.io.canAllocate && !io.robCommits.isWalk 76 77 78 // speculatively assign the instruction with an robIdx 79 val validCount = PopCount(io.in.map(_.valid)) // number of instructions waiting to enter rob (from decode) 80 val robIdxHead = RegInit(0.U.asTypeOf(new RobPtr)) 81 val lastCycleMisprediction = RegNext(io.redirect.valid && !io.redirect.bits.flushItself()) 82 val robIdxHeadNext = Mux(io.redirect.valid, io.redirect.bits.robIdx, // redirect: move ptr to given rob index 83 Mux(lastCycleMisprediction, robIdxHead + 1.U, // mis-predict: not flush robIdx itself 84 Mux(canOut, robIdxHead + validCount, // instructions successfully entered next stage: increase robIdx 85 /* default */ robIdxHead))) // no instructions passed by this cycle: stick to old value 86 robIdxHead := robIdxHeadNext 87 88 /** 89 * Rename: allocate free physical register and update rename table 90 */ 91 val uops = Wire(Vec(RenameWidth, new MicroOp)) 92 uops.foreach( uop => { 93 uop.srcState(0) := DontCare 94 uop.srcState(1) := DontCare 95 uop.srcState(2) := DontCare 96 uop.robIdx := DontCare 97 uop.debugInfo := DontCare 98 uop.lqIdx := DontCare 99 uop.sqIdx := DontCare 100 }) 101 102 val needFpDest = Wire(Vec(RenameWidth, Bool())) 103 val needIntDest = Wire(Vec(RenameWidth, Bool())) 104 val hasValid = Cat(io.in.map(_.valid)).orR 105 106 val isMove = io.in.map(_.bits.ctrl.isMove) 107 val intPsrc = Wire(Vec(RenameWidth, UInt())) 108 109 val intSpecWen = Wire(Vec(RenameWidth, Bool())) 110 val fpSpecWen = Wire(Vec(RenameWidth, Bool())) 111 112 // uop calculation 113 for (i <- 0 until RenameWidth) { 114 uops(i).cf := io.in(i).bits.cf 115 uops(i).ctrl := io.in(i).bits.ctrl 116 117 // update cf according to ssit result 118 uops(i).cf.storeSetHit := io.ssit(i).valid 119 uops(i).cf.loadWaitStrict := io.ssit(i).strict && io.ssit(i).valid 120 uops(i).cf.ssid := io.ssit(i).ssid 121 122 // update cf according to waittable result 123 uops(i).cf.loadWaitBit := io.waittable(i) 124 125 val inValid = io.in(i).valid 126 127 // alloc a new phy reg 128 needFpDest(i) := inValid && needDestReg(fp = true, io.in(i).bits) 129 needIntDest(i) := inValid && needDestReg(fp = false, io.in(i).bits) 130 fpFreeList.io.allocateReq(i) := needFpDest(i) 131 intFreeList.io.allocateReq(i) := needIntDest(i) && !isMove(i) 132 133 // no valid instruction from decode stage || all resources (dispatch1 + both free lists) ready 134 io.in(i).ready := !hasValid || canOut 135 136 uops(i).robIdx := robIdxHead + PopCount(io.in.take(i).map(_.valid)) 137 138 val intPhySrcVec = io.intReadPorts(i).take(2) 139 val intOldPdest = io.intReadPorts(i).last 140 intPsrc(i) := intPhySrcVec(0) 141 val fpPhySrcVec = io.fpReadPorts(i).take(3) 142 val fpOldPdest = io.fpReadPorts(i).last 143 uops(i).psrc(0) := Mux(uops(i).ctrl.srcType(0) === SrcType.reg, intPhySrcVec(0), fpPhySrcVec(0)) 144 uops(i).psrc(1) := Mux(uops(i).ctrl.srcType(1) === SrcType.reg, intPhySrcVec(1), fpPhySrcVec(1)) 145 uops(i).psrc(2) := fpPhySrcVec(2) 146 uops(i).old_pdest := Mux(uops(i).ctrl.rfWen, intOldPdest, fpOldPdest) 147 uops(i).eliminatedMove := isMove(i) 148 149 // update pdest 150 uops(i).pdest := Mux(needIntDest(i), intFreeList.io.allocatePhyReg(i), // normal int inst 151 // normal fp inst 152 Mux(needFpDest(i), fpFreeList.io.allocatePhyReg(i), 153 /* default */0.U)) 154 155 // Assign performance counters 156 uops(i).debugInfo.renameTime := GTimer() 157 158 io.out(i).valid := io.in(i).valid && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && !io.robCommits.isWalk 159 io.out(i).bits := uops(i) 160 when (io.out(i).bits.ctrl.fuType === FuType.fence) { 161 io.out(i).bits.ctrl.imm := Cat(io.in(i).bits.ctrl.lsrc(1), io.in(i).bits.ctrl.lsrc(0)) 162 } 163 164 // write speculative rename table 165 // we update rat later inside commit code 166 intSpecWen(i) := needIntDest(i) && intFreeList.io.canAllocate && intFreeList.io.doAllocate && !io.robCommits.isWalk && !io.redirect.valid 167 fpSpecWen(i) := needFpDest(i) && fpFreeList.io.canAllocate && fpFreeList.io.doAllocate && !io.robCommits.isWalk && !io.redirect.valid 168 169 intRefCounter.io.allocate(i).valid := intSpecWen(i) 170 intRefCounter.io.allocate(i).bits := io.out(i).bits.pdest 171 } 172 173 /** 174 * How to set psrc: 175 * - bypass the pdest to psrc if previous instructions write to the same ldest as lsrc 176 * - default: psrc from RAT 177 * How to set pdest: 178 * - Mux(isMove, psrc, pdest_from_freelist). 179 * 180 * The critical path of rename lies here: 181 * When move elimination is enabled, we need to update the rat with psrc. 182 * However, psrc maybe comes from previous instructions' pdest, which comes from freelist. 183 * 184 * If we expand these logic for pdest(N): 185 * pdest(N) = Mux(isMove(N), psrc(N), freelist_out(N)) 186 * = Mux(isMove(N), Mux(bypass(N, N - 1), pdest(N - 1), 187 * Mux(bypass(N, N - 2), pdest(N - 2), 188 * ... 189 * Mux(bypass(N, 0), pdest(0), 190 * rat_out(N))...)), 191 * freelist_out(N)) 192 */ 193 // a simple functional model for now 194 io.out(0).bits.pdest := Mux(isMove(0), uops(0).psrc.head, uops(0).pdest) 195 val bypassCond = Wire(Vec(4, MixedVec(List.tabulate(RenameWidth-1)(i => UInt((i+1).W))))) 196 for (i <- 1 until RenameWidth) { 197 val fpCond = io.in(i).bits.ctrl.srcType.map(_ === SrcType.fp) :+ needFpDest(i) 198 val intCond = io.in(i).bits.ctrl.srcType.map(_ === SrcType.reg) :+ needIntDest(i) 199 val target = io.in(i).bits.ctrl.lsrc :+ io.in(i).bits.ctrl.ldest 200 for ((((cond1, cond2), t), j) <- fpCond.zip(intCond).zip(target).zipWithIndex) { 201 val destToSrc = io.in.take(i).zipWithIndex.map { case (in, j) => 202 val indexMatch = in.bits.ctrl.ldest === t 203 val writeMatch = cond2 && needIntDest(j) || cond1 && needFpDest(j) 204 indexMatch && writeMatch 205 } 206 bypassCond(j)(i - 1) := VecInit(destToSrc).asUInt 207 } 208 io.out(i).bits.psrc(0) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(0)(i-1).asBools).foldLeft(uops(i).psrc(0)) { 209 (z, next) => Mux(next._2, next._1, z) 210 } 211 io.out(i).bits.psrc(1) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(1)(i-1).asBools).foldLeft(uops(i).psrc(1)) { 212 (z, next) => Mux(next._2, next._1, z) 213 } 214 io.out(i).bits.psrc(2) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(2)(i-1).asBools).foldLeft(uops(i).psrc(2)) { 215 (z, next) => Mux(next._2, next._1, z) 216 } 217 io.out(i).bits.old_pdest := io.out.take(i).map(_.bits.pdest).zip(bypassCond(3)(i-1).asBools).foldLeft(uops(i).old_pdest) { 218 (z, next) => Mux(next._2, next._1, z) 219 } 220 io.out(i).bits.pdest := Mux(isMove(i), io.out(i).bits.psrc(0), uops(i).pdest) 221 222 // For fused-lui-load, load.src(0) is replaced by the imm. 223 val last_is_lui = io.in(i - 1).bits.ctrl.selImm === SelImm.IMM_U && io.in(i - 1).bits.ctrl.srcType(0) =/= SrcType.pc 224 val this_is_load = io.in(i).bits.ctrl.fuType === FuType.ldu && !LSUOpType.isPrefetch(io.in(i).bits.ctrl.fuOpType) 225 val lui_to_load = io.in(i - 1).valid && io.in(i - 1).bits.ctrl.ldest === io.in(i).bits.ctrl.lsrc(0) 226 val fused_lui_load = last_is_lui && this_is_load && lui_to_load 227 when (fused_lui_load) { 228 // The first LOAD operand (base address) is replaced by LUI-imm and stored in {psrc, imm} 229 val lui_imm = io.in(i - 1).bits.ctrl.imm 230 val ld_imm = io.in(i).bits.ctrl.imm 231 io.out(i).bits.ctrl.srcType(0) := SrcType.imm 232 io.out(i).bits.ctrl.imm := Imm_LUI_LOAD().immFromLuiLoad(lui_imm, ld_imm) 233 val psrcWidth = uops(i).psrc.head.getWidth 234 val lui_imm_in_imm = uops(i).ctrl.imm.getWidth - Imm_I().len 235 val left_lui_imm = Imm_U().len - lui_imm_in_imm 236 require(2 * psrcWidth >= left_lui_imm, "cannot fused lui and load with psrc") 237 io.out(i).bits.psrc(0) := lui_imm(lui_imm_in_imm + psrcWidth - 1, lui_imm_in_imm) 238 io.out(i).bits.psrc(1) := lui_imm(lui_imm.getWidth - 1, lui_imm_in_imm + psrcWidth) 239 } 240 241 } 242 243 /** 244 * Instructions commit: update freelist and rename table 245 */ 246 for (i <- 0 until CommitWidth) { 247 248 Seq((io.intRenamePorts, false), (io.fpRenamePorts, true)) foreach { case (rat, fp) => 249 // is valid commit req and given instruction has destination register 250 val commitDestValid = io.robCommits.valid(i) && needDestRegCommit(fp, io.robCommits.info(i)) 251 XSDebug(p"isFp[${fp}]index[$i]-commitDestValid:$commitDestValid,isWalk:${io.robCommits.isWalk}\n") 252 253 /* 254 I. RAT Update 255 */ 256 257 // walk back write - restore spec state : ldest => old_pdest 258 if (fp && i < RenameWidth) { 259 // When redirect happens (mis-prediction), don't update the rename table 260 rat(i).wen := fpSpecWen(i) 261 rat(i).addr := uops(i).ctrl.ldest 262 rat(i).data := fpFreeList.io.allocatePhyReg(i) 263 } else if (!fp && i < RenameWidth) { 264 rat(i).wen := intSpecWen(i) 265 rat(i).addr := uops(i).ctrl.ldest 266 rat(i).data := io.out(i).bits.pdest 267 } 268 269 /* 270 II. Free List Update 271 */ 272 if (fp) { // Float Point free list 273 fpFreeList.io.freeReq(i) := commitDestValid && !io.robCommits.isWalk 274 fpFreeList.io.freePhyReg(i) := io.robCommits.info(i).old_pdest 275 } else { // Integer free list 276 intFreeList.io.freeReq(i) := intRefCounter.io.freeRegs(i).valid 277 intFreeList.io.freePhyReg(i) := intRefCounter.io.freeRegs(i).bits 278 } 279 } 280 intRefCounter.io.deallocate(i).valid := io.robCommits.valid(i) && needDestRegCommit(false, io.robCommits.info(i)) 281 intRefCounter.io.deallocate(i).bits := Mux(io.robCommits.isWalk, io.robCommits.info(i).pdest, io.robCommits.info(i).old_pdest) 282 } 283 284 /* 285 Debug and performance counters 286 */ 287 def printRenameInfo(in: DecoupledIO[CfCtrl], out: DecoupledIO[MicroOp]) = { 288 XSInfo(out.fire, p"pc:${Hexadecimal(in.bits.cf.pc)} in(${in.valid},${in.ready}) " + 289 p"lsrc(0):${in.bits.ctrl.lsrc(0)} -> psrc(0):${out.bits.psrc(0)} " + 290 p"lsrc(1):${in.bits.ctrl.lsrc(1)} -> psrc(1):${out.bits.psrc(1)} " + 291 p"lsrc(2):${in.bits.ctrl.lsrc(2)} -> psrc(2):${out.bits.psrc(2)} " + 292 p"ldest:${in.bits.ctrl.ldest} -> pdest:${out.bits.pdest} " + 293 p"old_pdest:${out.bits.old_pdest}\n" 294 ) 295 } 296 297 for((x,y) <- io.in.zip(io.out)){ 298 printRenameInfo(x, y) 299 } 300 301 XSDebug(io.robCommits.isWalk, p"Walk Recovery Enabled\n") 302 XSDebug(io.robCommits.isWalk, p"validVec:${Binary(io.robCommits.valid.asUInt)}\n") 303 for (i <- 0 until CommitWidth) { 304 val info = io.robCommits.info(i) 305 XSDebug(io.robCommits.isWalk && io.robCommits.valid(i), p"[#$i walk info] pc:${Hexadecimal(info.pc)} " + 306 p"ldest:${info.ldest} rfWen:${info.rfWen} fpWen:${info.fpWen} " + 307 p"pdest:${info.pdest} old_pdest:${info.old_pdest}\n") 308 } 309 310 XSDebug(p"inValidVec: ${Binary(Cat(io.in.map(_.valid)))}\n") 311 312 XSPerfAccumulate("in", Mux(RegNext(io.in(0).ready), PopCount(io.in.map(_.valid)), 0.U)) 313 XSPerfAccumulate("utilization", PopCount(io.in.map(_.valid))) 314 XSPerfAccumulate("waitInstr", PopCount((0 until RenameWidth).map(i => io.in(i).valid && !io.in(i).ready))) 315 XSPerfAccumulate("stall_cycle_dispatch", hasValid && !io.out(0).ready && fpFreeList.io.canAllocate && intFreeList.io.canAllocate && !io.robCommits.isWalk) 316 XSPerfAccumulate("stall_cycle_fp", hasValid && io.out(0).ready && !fpFreeList.io.canAllocate && intFreeList.io.canAllocate && !io.robCommits.isWalk) 317 XSPerfAccumulate("stall_cycle_int", hasValid && io.out(0).ready && fpFreeList.io.canAllocate && !intFreeList.io.canAllocate && !io.robCommits.isWalk) 318 XSPerfAccumulate("stall_cycle_walk", hasValid && io.out(0).ready && fpFreeList.io.canAllocate && intFreeList.io.canAllocate && io.robCommits.isWalk) 319 320 XSPerfAccumulate("move_instr_count", PopCount(io.out.map(out => out.fire() && out.bits.ctrl.isMove))) 321 val is_fused_lui_load = io.out.map(o => o.fire() && o.bits.ctrl.fuType === FuType.ldu && o.bits.ctrl.srcType(0) === SrcType.imm) 322 XSPerfAccumulate("fused_lui_load_instr_count", PopCount(is_fused_lui_load)) 323 324 325 val renamePerf = Seq( 326 ("rename_in ", PopCount(io.in.map(_.valid & io.in(0).ready )) ), 327 ("rename_waitinstr ", PopCount((0 until RenameWidth).map(i => io.in(i).valid && !io.in(i).ready)) ), 328 ("rename_stall_cycle_dispatch", hasValid && !io.out(0).ready && fpFreeList.io.canAllocate && intFreeList.io.canAllocate && !io.robCommits.isWalk), 329 ("rename_stall_cycle_fp ", hasValid && io.out(0).ready && !fpFreeList.io.canAllocate && intFreeList.io.canAllocate && !io.robCommits.isWalk), 330 ("rename_stall_cycle_int ", hasValid && io.out(0).ready && fpFreeList.io.canAllocate && !intFreeList.io.canAllocate && !io.robCommits.isWalk), 331 ("rename_stall_cycle_walk ", hasValid && io.out(0).ready && fpFreeList.io.canAllocate && intFreeList.io.canAllocate && io.robCommits.isWalk) 332 ) 333 val intFlPerf = intFreeList.getPerfEvents 334 val fpFlPerf = fpFreeList.getPerfEvents 335 val perfEvents = renamePerf ++ intFlPerf ++ fpFlPerf 336 generatePerfEvent() 337} 338