1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.backend 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp} 23import utility._ 24import utils._ 25import xiangshan.ExceptionNO._ 26import xiangshan._ 27import xiangshan.backend.Bundles.{DecodedInst, DynInst, ExceptionInfo, ExuOutput} 28import xiangshan.backend.ctrlblock.{DebugLSIO, DebugLsInfoBundle, LsTopdownInfo, MemCtrl, RedirectGenerator} 29import xiangshan.backend.datapath.DataConfig.VAddrData 30import xiangshan.backend.decode.{DecodeStage, FusionDecoder} 31import xiangshan.backend.dispatch.{CoreDispatchTopDownIO, Dispatch, DispatchQueue} 32import xiangshan.backend.fu.PFEvent 33import xiangshan.backend.fu.vector.Bundles.VType 34import xiangshan.backend.rename.{Rename, RenameTableWrapper, SnapshotGenerator} 35import xiangshan.backend.rob.{Rob, RobCSRIO, RobCoreTopDownIO, RobDebugRollingIO, RobLsqIO, RobPtr} 36import xiangshan.frontend.{FtqPtr, FtqRead, Ftq_RF_Components} 37import xiangshan.mem.{LqPtr, LsqEnqIO} 38 39class CtrlToFtqIO(implicit p: Parameters) extends XSBundle { 40 val rob_commits = Vec(CommitWidth, Valid(new RobCommitInfo)) 41 val redirect = Valid(new Redirect) 42 val ftqIdxAhead = Vec(BackendRedirectNum, Valid(new FtqPtr)) 43 val ftqIdxSelOH = Valid(UInt((BackendRedirectNum).W)) 44} 45 46class CtrlBlock(params: BackendParams)(implicit p: Parameters) extends LazyModule { 47 override def shouldBeInlined: Boolean = false 48 49 val rob = LazyModule(new Rob(params)) 50 51 lazy val module = new CtrlBlockImp(this)(p, params) 52 53} 54 55class CtrlBlockImp( 56 override val wrapper: CtrlBlock 57)(implicit 58 p: Parameters, 59 params: BackendParams 60) extends LazyModuleImp(wrapper) 61 with HasXSParameter 62 with HasCircularQueuePtrHelper 63 with HasPerfEvents 64{ 65 val pcMemRdIndexes = new NamedIndexes(Seq( 66 "exu" -> params.numPcReadPort, 67 "redirect" -> 1, 68 "memPred" -> 1, 69 "robFlush" -> 1, 70 "load" -> params.LduCnt, 71 "hybrid" -> params.HyuCnt, 72 "store" -> (if(EnableStorePrefetchSMS) params.StaCnt else 0) 73 )) 74 75 private val numPcMemReadForExu = params.numPcReadPort 76 private val numPcMemRead = pcMemRdIndexes.maxIdx 77 78 println(s"pcMem read num: $numPcMemRead") 79 println(s"pcMem read num for exu: $numPcMemReadForExu") 80 81 val io = IO(new CtrlBlockIO()) 82 83 val decode = Module(new DecodeStage) 84 val fusionDecoder = Module(new FusionDecoder) 85 val rat = Module(new RenameTableWrapper) 86 val rename = Module(new Rename) 87 val dispatch = Module(new Dispatch) 88 val intDq = Module(new DispatchQueue(dpParams.IntDqSize, RenameWidth, dpParams.IntDqDeqWidth)) 89 val fpDq = Module(new DispatchQueue(dpParams.FpDqSize, RenameWidth, dpParams.FpDqDeqWidth)) 90 val lsDq = Module(new DispatchQueue(dpParams.LsDqSize, RenameWidth, dpParams.LsDqDeqWidth)) 91 val redirectGen = Module(new RedirectGenerator) 92 private val pcMem = Module(new SyncDataModuleTemplate(new Ftq_RF_Components, FtqSize, numPcMemRead, 1, "BackendPC")) 93 private val rob = wrapper.rob.module 94 private val memCtrl = Module(new MemCtrl(params)) 95 96 private val disableFusion = decode.io.csrCtrl.singlestep || !decode.io.csrCtrl.fusion_enable 97 98 private val s0_robFlushRedirect = rob.io.flushOut 99 private val s1_robFlushRedirect = Wire(Valid(new Redirect)) 100 s1_robFlushRedirect.valid := RegNext(s0_robFlushRedirect.valid) 101 s1_robFlushRedirect.bits := RegEnable(s0_robFlushRedirect.bits, s0_robFlushRedirect.valid) 102 103 pcMem.io.raddr(pcMemRdIndexes("robFlush").head) := s0_robFlushRedirect.bits.ftqIdx.value 104 private val s1_robFlushPc = pcMem.io.rdata(pcMemRdIndexes("robFlush").head).getPc(RegEnable(s0_robFlushRedirect.bits.ftqOffset, s0_robFlushRedirect.valid)) 105 private val s3_redirectGen = redirectGen.io.stage2Redirect 106 private val s1_s3_redirect = Mux(s1_robFlushRedirect.valid, s1_robFlushRedirect, s3_redirectGen) 107 private val s2_s4_pendingRedirectValid = RegInit(false.B) 108 when (s1_s3_redirect.valid) { 109 s2_s4_pendingRedirectValid := true.B 110 }.elsewhen (RegNext(io.frontend.toFtq.redirect.valid)) { 111 s2_s4_pendingRedirectValid := false.B 112 } 113 114 // Redirect will be RegNext at ExuBlocks and IssueBlocks 115 val s2_s4_redirect = RegNextWithEnable(s1_s3_redirect) 116 val s3_s5_redirect = RegNextWithEnable(s2_s4_redirect) 117 118 private val delayedNotFlushedWriteBack = io.fromWB.wbData.map(x => { 119 val valid = x.valid 120 val killedByOlder = x.bits.robIdx.needFlush(Seq(s1_s3_redirect, s2_s4_redirect, s3_s5_redirect)) 121 val delayed = Wire(Valid(new ExuOutput(x.bits.params))) 122 delayed.valid := RegNext(valid && !killedByOlder) 123 delayed.bits := RegEnable(x.bits, x.valid) 124 delayed.bits.debugInfo.writebackTime := GTimer() 125 delayed 126 }).toSeq 127 128 private val exuPredecode = VecInit( 129 delayedNotFlushedWriteBack.filter(_.bits.redirect.nonEmpty).map(x => x.bits.predecodeInfo.get).toSeq 130 ) 131 132 private val exuRedirects: Seq[ValidIO[Redirect]] = delayedNotFlushedWriteBack.filter(_.bits.redirect.nonEmpty).map(x => { 133 val out = Wire(Valid(new Redirect())) 134 out.valid := x.valid && x.bits.redirect.get.valid && x.bits.redirect.get.bits.cfiUpdate.isMisPred 135 out.bits := x.bits.redirect.get.bits 136 out.bits.debugIsCtrl := true.B 137 out.bits.debugIsMemVio := false.B 138 out 139 }).toSeq 140 141 private val memViolation = io.fromMem.violation 142 val loadReplay = Wire(ValidIO(new Redirect)) 143 loadReplay.valid := RegNext(memViolation.valid && 144 !memViolation.bits.robIdx.needFlush(Seq(s1_s3_redirect, s2_s4_redirect)) 145 ) 146 loadReplay.bits := RegEnable(memViolation.bits, memViolation.valid) 147 loadReplay.bits.debugIsCtrl := false.B 148 loadReplay.bits.debugIsMemVio := true.B 149 150 val pdestReverse = rob.io.commits.info.map(info => info.pdest).reverse 151 152 pcMem.io.raddr(pcMemRdIndexes("redirect").head) := redirectGen.io.redirectPcRead.ptr.value 153 redirectGen.io.redirectPcRead.data := pcMem.io.rdata(pcMemRdIndexes("redirect").head).getPc(RegNext(redirectGen.io.redirectPcRead.offset)) 154 pcMem.io.raddr(pcMemRdIndexes("memPred").head) := redirectGen.io.memPredPcRead.ptr.value 155 redirectGen.io.memPredPcRead.data := pcMem.io.rdata(pcMemRdIndexes("memPred").head).getPc(RegNext(redirectGen.io.memPredPcRead.offset)) 156 157 for ((pcMemIdx, i) <- pcMemRdIndexes("load").zipWithIndex) { 158 // load read pcMem (s0) -> get rdata (s1) -> reg next in Memblock (s2) -> reg next in Memblock (s3) -> consumed by pf (s3) 159 pcMem.io.raddr(pcMemIdx) := io.memLdPcRead(i).ptr.value 160 io.memLdPcRead(i).data := pcMem.io.rdata(pcMemIdx).getPc(RegNext(io.memLdPcRead(i).offset)) 161 } 162 163 for ((pcMemIdx, i) <- pcMemRdIndexes("hybrid").zipWithIndex) { 164 // load read pcMem (s0) -> get rdata (s1) -> reg next in Memblock (s2) -> reg next in Memblock (s3) -> consumed by pf (s3) 165 pcMem.io.raddr(pcMemIdx) := io.memHyPcRead(i).ptr.value 166 io.memHyPcRead(i).data := pcMem.io.rdata(pcMemIdx).getPc(RegNext(io.memHyPcRead(i).offset)) 167 } 168 169 if (EnableStorePrefetchSMS) { 170 for ((pcMemIdx, i) <- pcMemRdIndexes("store").zipWithIndex) { 171 pcMem.io.raddr(pcMemIdx) := io.memStPcRead(i).ptr.value 172 io.memStPcRead(i).data := pcMem.io.rdata(pcMemIdx).getPc(RegNext(io.memStPcRead(i).offset)) 173 } 174 } else { 175 io.memStPcRead.foreach(_.data := 0.U) 176 } 177 178 redirectGen.io.hartId := io.fromTop.hartId 179 redirectGen.io.exuRedirect := exuRedirects.toSeq 180 redirectGen.io.exuOutPredecode := exuPredecode // guarded by exuRedirect.valid 181 redirectGen.io.loadReplay <> loadReplay 182 183 redirectGen.io.robFlush := s1_robFlushRedirect.valid 184 185 val s5_flushFromRobValidAhead = DelayN(s1_robFlushRedirect.valid, 4) 186 val s6_flushFromRobValid = RegNext(s5_flushFromRobValidAhead) 187 val frontendFlushBits = RegEnable(s1_robFlushRedirect.bits, s1_robFlushRedirect.valid) // ?? 188 // When ROB commits an instruction with a flush, we notify the frontend of the flush without the commit. 189 // Flushes to frontend may be delayed by some cycles and commit before flush causes errors. 190 // Thus, we make all flush reasons to behave the same as exceptions for frontend. 191 for (i <- 0 until CommitWidth) { 192 // why flushOut: instructions with flushPipe are not commited to frontend 193 // If we commit them to frontend, it will cause flush after commit, which is not acceptable by frontend. 194 val s1_isCommit = rob.io.commits.commitValid(i) && rob.io.commits.isCommit && !s0_robFlushRedirect.valid 195 io.frontend.toFtq.rob_commits(i).valid := RegNext(s1_isCommit) 196 io.frontend.toFtq.rob_commits(i).bits := RegEnable(rob.io.commits.info(i), s1_isCommit) 197 } 198 io.frontend.toFtq.redirect.valid := s6_flushFromRobValid || s3_redirectGen.valid 199 io.frontend.toFtq.redirect.bits := Mux(s6_flushFromRobValid, frontendFlushBits, s3_redirectGen.bits) 200 io.frontend.toFtq.ftqIdxSelOH.valid := s6_flushFromRobValid || redirectGen.io.stage2Redirect.valid 201 io.frontend.toFtq.ftqIdxSelOH.bits := Cat(s6_flushFromRobValid, redirectGen.io.stage2oldestOH & Fill(NumRedirect + 1, !s6_flushFromRobValid)) 202 203 //jmp/brh 204 for (i <- 0 until NumRedirect) { 205 io.frontend.toFtq.ftqIdxAhead(i).valid := exuRedirects(i).valid && exuRedirects(i).bits.cfiUpdate.isMisPred && !s1_robFlushRedirect.valid && !s5_flushFromRobValidAhead 206 io.frontend.toFtq.ftqIdxAhead(i).bits := exuRedirects(i).bits.ftqIdx 207 } 208 //loadreplay 209 io.frontend.toFtq.ftqIdxAhead(NumRedirect).valid := loadReplay.valid && !s1_robFlushRedirect.valid && !s5_flushFromRobValidAhead 210 io.frontend.toFtq.ftqIdxAhead(NumRedirect).bits := loadReplay.bits.ftqIdx 211 //exception 212 io.frontend.toFtq.ftqIdxAhead.last.valid := s5_flushFromRobValidAhead 213 io.frontend.toFtq.ftqIdxAhead.last.bits := frontendFlushBits.ftqIdx 214 // Be careful here: 215 // T0: rob.io.flushOut, s0_robFlushRedirect 216 // T1: s1_robFlushRedirect, rob.io.exception.valid 217 // T2: csr.redirect.valid 218 // T3: csr.exception.valid 219 // T4: csr.trapTarget 220 // T5: ctrlBlock.trapTarget 221 // T6: io.frontend.toFtq.stage2Redirect.valid 222 val s2_robFlushPc = RegEnable(Mux(s1_robFlushRedirect.bits.flushItself(), 223 s1_robFlushPc, // replay inst 224 s1_robFlushPc + Mux(s1_robFlushRedirect.bits.isRVC, 2.U, 4.U) // flush pipe 225 ), s1_robFlushRedirect.valid) 226 private val s2_csrIsXRet = io.robio.csr.isXRet 227 private val s5_csrIsTrap = DelayN(rob.io.exception.valid, 4) 228 private val s2_s5_trapTargetFromCsr = io.robio.csr.trapTarget 229 230 val flushTarget = Mux(s2_csrIsXRet || s5_csrIsTrap, s2_s5_trapTargetFromCsr, s2_robFlushPc) 231 when (s6_flushFromRobValid) { 232 io.frontend.toFtq.redirect.bits.level := RedirectLevel.flush 233 io.frontend.toFtq.redirect.bits.cfiUpdate.target := RegEnable(flushTarget, s5_flushFromRobValidAhead) 234 } 235 236 // vtype commit 237 decode.io.commitVType.bits := io.fromDataPath.vtype 238 decode.io.commitVType.valid := RegNext(rob.io.isVsetFlushPipe) 239 240 io.toDataPath.vtypeAddr := rob.io.vconfigPdest 241 242 decode.io.walkVType := rob.io.toDecode.vtype 243 244 decode.io.redirect := s1_s3_redirect.valid || s2_s4_pendingRedirectValid 245 246 decode.io.in.zip(io.frontend.cfVec).foreach { case (decodeIn, frontendCf) => 247 decodeIn.valid := frontendCf.valid 248 frontendCf.ready := decodeIn.ready 249 decodeIn.bits.connectCtrlFlow(frontendCf.bits) 250 } 251 decode.io.csrCtrl := RegNext(io.csrCtrl) 252 decode.io.intRat <> rat.io.intReadPorts 253 decode.io.fpRat <> rat.io.fpReadPorts 254 decode.io.vecRat <> rat.io.vecReadPorts 255 decode.io.fusion := 0.U.asTypeOf(decode.io.fusion) // Todo 256 decode.io.stallReason.in <> io.frontend.stallReason 257 258 // snapshot check 259 class CFIRobIdx extends Bundle { 260 val robIdx = Vec(RenameWidth, new RobPtr) 261 val isCFI = Vec(RenameWidth, Bool()) 262 } 263 val genSnapshot = Cat(rename.io.out.map(out => out.fire && out.bits.snapshot)).orR 264 val snpt = Module(new SnapshotGenerator(0.U.asTypeOf(new CFIRobIdx))) 265 snpt.io.enq := genSnapshot 266 snpt.io.enqData.robIdx := rename.io.out.map(_.bits.robIdx) 267 snpt.io.enqData.isCFI := rename.io.out.map(_.bits.snapshot) 268 snpt.io.deq := snpt.io.valids(snpt.io.deqPtr.value) && rob.io.commits.isCommit && 269 Cat(rob.io.commits.commitValid.zip(rob.io.commits.robIdx).map(x => x._1 && x._2 === snpt.io.snapshots(snpt.io.deqPtr.value).robIdx.head)).orR 270 snpt.io.redirect := s1_s3_redirect.valid 271 val flushVec = VecInit(snpt.io.snapshots.map { snapshot => 272 val notCFIMask = snapshot.isCFI.map(~_) 273 val shouldFlushMask = snapshot.robIdx.map(snptRobIdx => snptRobIdx >= s1_s3_redirect.bits.robIdx || isFull(snptRobIdx, s1_s3_redirect.bits.robIdx)) 274 val realShouldFlush = (1 to RenameWidth).map(i => Cat(shouldFlushMask.take(i)).orR) 275 s1_s3_redirect.valid && Cat(realShouldFlush.zip(notCFIMask).map(x => x._1 | x._2)).andR 276 }) 277 val flushVecNext = RegNext(flushVec, 0.U.asTypeOf(flushVec)) 278 snpt.io.flushVec := flushVecNext 279 280 val useSnpt = VecInit.tabulate(RenameSnapshotNum)(idx => 281 snpt.io.valids(idx) && s1_s3_redirect.bits.robIdx >= snpt.io.snapshots(idx).robIdx.head 282 ).reduceTree(_ || _) 283 val snptSelect = MuxCase( 284 0.U(log2Ceil(RenameSnapshotNum).W), 285 (1 to RenameSnapshotNum).map(i => (snpt.io.enqPtr - i.U).value).map(idx => 286 (snpt.io.valids(idx) && s1_s3_redirect.bits.robIdx >= snpt.io.snapshots(idx).robIdx.head, idx) 287 ) 288 ) 289 290 rob.io.snpt.snptEnq := DontCare 291 rob.io.snpt.snptDeq := snpt.io.deq 292 rob.io.snpt.useSnpt := useSnpt 293 rob.io.snpt.snptSelect := snptSelect 294 rob.io.snpt.flushVec := flushVecNext 295 rat.io.snpt.snptEnq := genSnapshot 296 rat.io.snpt.snptDeq := snpt.io.deq 297 rat.io.snpt.useSnpt := useSnpt 298 rat.io.snpt.snptSelect := snptSelect 299 rat.io.snpt.flushVec := flushVec 300 301 val decodeHasException = decode.io.out.map(x => x.bits.exceptionVec(instrPageFault) || x.bits.exceptionVec(instrAccessFault)) 302 // fusion decoder 303 for (i <- 0 until DecodeWidth) { 304 fusionDecoder.io.in(i).valid := decode.io.out(i).valid && !(decodeHasException(i) || disableFusion) 305 fusionDecoder.io.in(i).bits := decode.io.out(i).bits.instr 306 if (i > 0) { 307 fusionDecoder.io.inReady(i - 1) := decode.io.out(i).ready 308 } 309 } 310 311 private val decodePipeRename = Wire(Vec(RenameWidth, DecoupledIO(new DecodedInst))) 312 313 for (i <- 0 until RenameWidth) { 314 PipelineConnect(decode.io.out(i), decodePipeRename(i), rename.io.in(i).ready, 315 s1_s3_redirect.valid || s2_s4_pendingRedirectValid, moduleName = Some("decodePipeRenameModule")) 316 317 decodePipeRename(i).ready := rename.io.in(i).ready 318 rename.io.in(i).valid := decodePipeRename(i).valid && !fusionDecoder.io.clear(i) 319 rename.io.in(i).bits := decodePipeRename(i).bits 320 } 321 322 for (i <- 0 until RenameWidth - 1) { 323 fusionDecoder.io.dec(i) := decodePipeRename(i).bits 324 rename.io.fusionInfo(i) := fusionDecoder.io.info(i) 325 326 // update the first RenameWidth - 1 instructions 327 decode.io.fusion(i) := fusionDecoder.io.out(i).valid && rename.io.out(i).fire 328 when (fusionDecoder.io.out(i).valid) { 329 fusionDecoder.io.out(i).bits.update(rename.io.in(i).bits) 330 // TODO: remove this dirty code for ftq update 331 val sameFtqPtr = rename.io.in(i).bits.ftqPtr.value === rename.io.in(i + 1).bits.ftqPtr.value 332 val ftqOffset0 = rename.io.in(i).bits.ftqOffset 333 val ftqOffset1 = rename.io.in(i + 1).bits.ftqOffset 334 val ftqOffsetDiff = ftqOffset1 - ftqOffset0 335 val cond1 = sameFtqPtr && ftqOffsetDiff === 1.U 336 val cond2 = sameFtqPtr && ftqOffsetDiff === 2.U 337 val cond3 = !sameFtqPtr && ftqOffset1 === 0.U 338 val cond4 = !sameFtqPtr && ftqOffset1 === 1.U 339 rename.io.in(i).bits.commitType := Mux(cond1, 4.U, Mux(cond2, 5.U, Mux(cond3, 6.U, 7.U))) 340 XSError(!cond1 && !cond2 && !cond3 && !cond4, p"new condition $sameFtqPtr $ftqOffset0 $ftqOffset1\n") 341 } 342 343 } 344 345 // memory dependency predict 346 // when decode, send fold pc to mdp 347 private val mdpFlodPcVec = Wire(Vec(DecodeWidth, UInt(MemPredPCWidth.W))) 348 for (i <- 0 until DecodeWidth) { 349 mdpFlodPcVec(i) := Mux( 350 decode.io.out(i).fire, 351 decode.io.in(i).bits.foldpc, 352 rename.io.in(i).bits.foldpc 353 ) 354 } 355 356 // currently, we only update mdp info when isReplay 357 memCtrl.io.redirect := s1_s3_redirect 358 memCtrl.io.csrCtrl := io.csrCtrl // RegNext in memCtrl 359 memCtrl.io.stIn := io.fromMem.stIn // RegNext in memCtrl 360 memCtrl.io.memPredUpdate := redirectGen.io.memPredUpdate // RegNext in memCtrl 361 memCtrl.io.mdpFlodPcVec := mdpFlodPcVec 362 memCtrl.io.dispatchLFSTio <> dispatch.io.lfst 363 364 rat.io.redirect := s1_s3_redirect.valid 365 rat.io.robCommits := rob.io.rabCommits 366 rat.io.diffCommits := rob.io.diffCommits 367 rat.io.intRenamePorts := rename.io.intRenamePorts 368 rat.io.fpRenamePorts := rename.io.fpRenamePorts 369 rat.io.vecRenamePorts := rename.io.vecRenamePorts 370 371 rename.io.redirect := s1_s3_redirect 372 rename.io.robCommits <> rob.io.rabCommits 373 rename.io.waittable := (memCtrl.io.waitTable2Rename zip decode.io.out).map{ case(waittable2rename, decodeOut) => 374 RegEnable(waittable2rename, decodeOut.fire) 375 } 376 rename.io.ssit := memCtrl.io.ssit2Rename 377 rename.io.intReadPorts := VecInit(rat.io.intReadPorts.map(x => VecInit(x.map(_.data)))) 378 rename.io.fpReadPorts := VecInit(rat.io.fpReadPorts.map(x => VecInit(x.map(_.data)))) 379 rename.io.vecReadPorts := VecInit(rat.io.vecReadPorts.map(x => VecInit(x.map(_.data)))) 380 rename.io.int_need_free := rat.io.int_need_free 381 rename.io.int_old_pdest := rat.io.int_old_pdest 382 rename.io.fp_old_pdest := rat.io.fp_old_pdest 383 rename.io.vec_old_pdest := rat.io.vec_old_pdest 384 rename.io.debug_int_rat.foreach(_ := rat.io.debug_int_rat.get) 385 rename.io.debug_fp_rat.foreach(_ := rat.io.debug_fp_rat.get) 386 rename.io.debug_vec_rat.foreach(_ := rat.io.debug_vec_rat.get) 387 rename.io.debug_vconfig_rat.foreach(_ := rat.io.debug_vconfig_rat.get) 388 rename.io.stallReason.in <> decode.io.stallReason.out 389 rename.io.snpt.snptEnq := DontCare 390 rename.io.snpt.snptDeq := snpt.io.deq 391 rename.io.snpt.useSnpt := useSnpt 392 rename.io.snpt.snptSelect := snptSelect 393 rename.io.robIsEmpty := rob.io.enq.isEmpty 394 rename.io.snpt.flushVec := flushVecNext 395 rename.io.snptLastEnq.valid := !isEmpty(snpt.io.enqPtr, snpt.io.deqPtr) 396 rename.io.snptLastEnq.bits := snpt.io.snapshots((snpt.io.enqPtr - 1.U).value).robIdx.head 397 398 val renameOut = Wire(chiselTypeOf(rename.io.out)) 399 renameOut <> rename.io.out 400 dispatch.io.fromRename <> renameOut 401 renameOut.zip(dispatch.io.recv).map{case (rename,recv) => rename.ready := recv} 402 dispatch.io.fromRenameIsFp := rename.io.toDispatchIsFp 403 dispatch.io.fromRenameIsInt := rename.io.toDispatchIsInt 404 dispatch.io.hartId := io.fromTop.hartId 405 dispatch.io.redirect := s1_s3_redirect 406 dispatch.io.enqRob <> rob.io.enq 407 dispatch.io.robHead := rob.io.debugRobHead 408 dispatch.io.stallReason <> rename.io.stallReason.out 409 dispatch.io.lqCanAccept := io.lqCanAccept 410 dispatch.io.sqCanAccept := io.sqCanAccept 411 dispatch.io.robHeadNotReady := rob.io.headNotReady 412 dispatch.io.robFull := rob.io.robFull 413 dispatch.io.singleStep := RegNext(io.csrCtrl.singlestep) 414 415 intDq.io.enq <> dispatch.io.toIntDq 416 intDq.io.redirect <> s2_s4_redirect 417 418 fpDq.io.enq <> dispatch.io.toFpDq 419 fpDq.io.redirect <> s2_s4_redirect 420 421 lsDq.io.enq <> dispatch.io.toLsDq 422 lsDq.io.redirect <> s2_s4_redirect 423 424 io.toIssueBlock.intUops <> intDq.io.deq 425 io.toIssueBlock.vfUops <> fpDq.io.deq 426 io.toIssueBlock.memUops <> lsDq.io.deq 427 io.toIssueBlock.allocPregs <> dispatch.io.allocPregs 428 io.toIssueBlock.flush <> s2_s4_redirect 429 430 pcMem.io.wen.head := RegNext(io.frontend.fromFtq.pc_mem_wen) 431 pcMem.io.waddr.head := RegEnable(io.frontend.fromFtq.pc_mem_waddr, io.frontend.fromFtq.pc_mem_wen) 432 pcMem.io.wdata.head := RegEnable(io.frontend.fromFtq.pc_mem_wdata, io.frontend.fromFtq.pc_mem_wen) 433 434 private val jumpPcVec : Vec[UInt] = Wire(Vec(params.numPcReadPort, UInt(VAddrData().dataWidth.W))) 435 io.toIssueBlock.pcVec := jumpPcVec 436 437 io.toDataPath.flush := s2_s4_redirect 438 io.toExuBlock.flush := s2_s4_redirect 439 440 for ((pcMemIdx, i) <- pcMemRdIndexes("exu").zipWithIndex) { 441 pcMem.io.raddr(pcMemIdx) := intDq.io.deqNext(i).ftqPtr.value 442 jumpPcVec(i) := pcMem.io.rdata(pcMemIdx).getPc(RegNext(intDq.io.deqNext(i).ftqOffset)) 443 } 444 445 val dqOuts = Seq(io.toIssueBlock.intUops) ++ Seq(io.toIssueBlock.vfUops) ++ Seq(io.toIssueBlock.memUops) 446 dqOuts.zipWithIndex.foreach { case (dqOut, dqIdx) => 447 dqOut.map(_.bits.pc).zipWithIndex.map{ case (pc, portIdx) => 448 if(params.allSchdParams(dqIdx).numPcReadPort > 0){ 449 val realJumpPcVec = jumpPcVec.drop(params.allSchdParams.take(dqIdx).map(_.numPcReadPort).sum).take(params.allSchdParams(dqIdx).numPcReadPort) 450 pc := realJumpPcVec(portIdx) 451 } 452 } 453 } 454 455 rob.io.hartId := io.fromTop.hartId 456 rob.io.redirect := s1_s3_redirect 457 rob.io.writeback := delayedNotFlushedWriteBack 458 459 io.redirect := s1_s3_redirect 460 461 // rob to int block 462 io.robio.csr <> rob.io.csr 463 // When wfi is disabled, it will not block ROB commit. 464 rob.io.csr.wfiEvent := io.robio.csr.wfiEvent 465 rob.io.wfi_enable := decode.io.csrCtrl.wfi_enable 466 467 io.toTop.cpuHalt := DelayN(rob.io.cpu_halt, 5) 468 469 io.robio.csr.perfinfo.retiredInstr <> RegNext(rob.io.csr.perfinfo.retiredInstr) 470 io.robio.exception := rob.io.exception 471 io.robio.exception.bits.pc := s1_robFlushPc 472 473 // rob to mem block 474 io.robio.lsq <> rob.io.lsq 475 476 io.debug_int_rat .foreach(_ := rat.io.diff_int_rat.get) 477 io.debug_fp_rat .foreach(_ := rat.io.diff_fp_rat.get) 478 io.debug_vec_rat .foreach(_ := rat.io.diff_vec_rat.get) 479 io.debug_vconfig_rat.foreach(_ := rat.io.diff_vconfig_rat.get) 480 481 rob.io.debug_ls := io.robio.debug_ls 482 rob.io.debugHeadLsIssue := io.robio.robHeadLsIssue 483 rob.io.lsTopdownInfo := io.robio.lsTopdownInfo 484 rob.io.debugEnqLsq := io.debugEnqLsq 485 486 io.robio.robDeqPtr := rob.io.robDeqPtr 487 488 io.debugTopDown.fromRob := rob.io.debugTopDown.toCore 489 dispatch.io.debugTopDown.fromRob := rob.io.debugTopDown.toDispatch 490 dispatch.io.debugTopDown.fromCore := io.debugTopDown.fromCore 491 io.debugRolling := rob.io.debugRolling 492 493 io.perfInfo.ctrlInfo.robFull := RegNext(rob.io.robFull) 494 io.perfInfo.ctrlInfo.intdqFull := RegNext(intDq.io.dqFull) 495 io.perfInfo.ctrlInfo.fpdqFull := RegNext(fpDq.io.dqFull) 496 io.perfInfo.ctrlInfo.lsdqFull := RegNext(lsDq.io.dqFull) 497 498 val pfevent = Module(new PFEvent) 499 pfevent.io.distribute_csr := RegNext(io.csrCtrl.distribute_csr) 500 val csrevents = pfevent.io.hpmevent.slice(8,16) 501 502 val perfinfo = IO(new Bundle(){ 503 val perfEventsRs = Input(Vec(params.IqCnt, new PerfEvent)) 504 val perfEventsEu0 = Input(Vec(6, new PerfEvent)) 505 val perfEventsEu1 = Input(Vec(6, new PerfEvent)) 506 }) 507 508 val perfFromUnits = Seq(decode, rename, dispatch, intDq, fpDq, lsDq, rob).flatMap(_.getPerfEvents) 509 val perfFromIO = perfinfo.perfEventsEu0.map(x => ("perfEventsEu0", x.value)) ++ 510 perfinfo.perfEventsEu1.map(x => ("perfEventsEu1", x.value)) ++ 511 perfinfo.perfEventsRs.map(x => ("perfEventsRs", x.value)) 512 val perfBlock = Seq() 513 // let index = 0 be no event 514 val allPerfEvents = Seq(("noEvent", 0.U)) ++ perfFromUnits ++ perfFromIO ++ perfBlock 515 516 if (printEventCoding) { 517 for (((name, inc), i) <- allPerfEvents.zipWithIndex) { 518 println("CtrlBlock perfEvents Set", name, inc, i) 519 } 520 } 521 522 val allPerfInc = allPerfEvents.map(_._2.asTypeOf(new PerfEvent)) 523 val perfEvents = HPerfMonitor(csrevents, allPerfInc).getPerfEvents 524 generatePerfEvent() 525} 526 527class CtrlBlockIO()(implicit p: Parameters, params: BackendParams) extends XSBundle { 528 val fromTop = new Bundle { 529 val hartId = Input(UInt(8.W)) 530 } 531 val toTop = new Bundle { 532 val cpuHalt = Output(Bool()) 533 } 534 val frontend = Flipped(new FrontendToCtrlIO()) 535 val toIssueBlock = new Bundle { 536 val flush = ValidIO(new Redirect) 537 val allocPregs = Vec(RenameWidth, Output(new ResetPregStateReq)) 538 val intUops = Vec(dpParams.IntDqDeqWidth, DecoupledIO(new DynInst)) 539 val vfUops = Vec(dpParams.FpDqDeqWidth, DecoupledIO(new DynInst)) 540 val memUops = Vec(dpParams.LsDqDeqWidth, DecoupledIO(new DynInst)) 541 val pcVec = Output(Vec(params.numPcReadPort, UInt(VAddrData().dataWidth.W))) 542 } 543 val fromDataPath = new Bundle{ 544 val vtype = Input(new VType) 545 } 546 val toDataPath = new Bundle { 547 val vtypeAddr = Output(UInt(PhyRegIdxWidth.W)) 548 val flush = ValidIO(new Redirect) 549 } 550 val toExuBlock = new Bundle { 551 val flush = ValidIO(new Redirect) 552 } 553 val fromWB = new Bundle { 554 val wbData = Flipped(MixedVec(params.genWrite2CtrlBundles)) 555 } 556 val redirect = ValidIO(new Redirect) 557 val fromMem = new Bundle { 558 val stIn = Vec(params.StaExuCnt, Flipped(ValidIO(new DynInst))) // use storeSetHit, ssid, robIdx 559 val violation = Flipped(ValidIO(new Redirect)) 560 } 561 val memLdPcRead = Vec(params.LduCnt, Flipped(new FtqRead(UInt(VAddrBits.W)))) 562 val memStPcRead = Vec(params.StaCnt, Flipped(new FtqRead(UInt(VAddrBits.W)))) 563 val memHyPcRead = Vec(params.HyuCnt, Flipped(new FtqRead(UInt(VAddrBits.W)))) 564 565 val csrCtrl = Input(new CustomCSRCtrlIO) 566 val robio = new Bundle { 567 val csr = new RobCSRIO 568 val exception = ValidIO(new ExceptionInfo) 569 val lsq = new RobLsqIO 570 val lsTopdownInfo = Vec(params.LduCnt + params.HyuCnt, Input(new LsTopdownInfo)) 571 val debug_ls = Input(new DebugLSIO()) 572 val robHeadLsIssue = Input(Bool()) 573 val robDeqPtr = Output(new RobPtr) 574 } 575 576 val perfInfo = Output(new Bundle{ 577 val ctrlInfo = new Bundle { 578 val robFull = Bool() 579 val intdqFull = Bool() 580 val fpdqFull = Bool() 581 val lsdqFull = Bool() 582 } 583 }) 584 val debug_int_rat = if (params.debugEn) Some(Vec(32, Output(UInt(PhyRegIdxWidth.W)))) else None 585 val debug_fp_rat = if (params.debugEn) Some(Vec(32, Output(UInt(PhyRegIdxWidth.W)))) else None 586 val debug_vec_rat = if (params.debugEn) Some(Vec(32, Output(UInt(PhyRegIdxWidth.W)))) else None 587 val debug_vconfig_rat = if (params.debugEn) Some(Output(UInt(PhyRegIdxWidth.W))) else None // TODO: use me 588 589 val sqCanAccept = Input(Bool()) 590 val lqCanAccept = Input(Bool()) 591 592 val debugTopDown = new Bundle { 593 val fromRob = new RobCoreTopDownIO 594 val fromCore = new CoreDispatchTopDownIO 595 } 596 val debugRolling = new RobDebugRollingIO 597 val debugEnqLsq = Input(new LsqEnqIO) 598} 599 600class NamedIndexes(namedCnt: Seq[(String, Int)]) { 601 require(namedCnt.map(_._1).distinct.size == namedCnt.size, "namedCnt should not have the same name") 602 603 val maxIdx = namedCnt.map(_._2).sum 604 val nameRangeMap: Map[String, (Int, Int)] = namedCnt.indices.map { i => 605 val begin = namedCnt.slice(0, i).map(_._2).sum 606 val end = begin + namedCnt(i)._2 607 (namedCnt(i)._1, (begin, end)) 608 }.toMap 609 610 def apply(name: String): Seq[Int] = { 611 require(nameRangeMap.contains(name)) 612 nameRangeMap(name)._1 until nameRangeMap(name)._2 613 } 614} 615