1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.backend.rob 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import difftest._ 23import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp} 24import utils._ 25import xiangshan._ 26import xiangshan.backend.exu.ExuConfig 27import xiangshan.frontend.FtqPtr 28 29class RobPtr(implicit p: Parameters) extends CircularQueuePtr[RobPtr]( 30 p => p(XSCoreParamsKey).RobSize 31) with HasCircularQueuePtrHelper { 32 33 def needFlush(redirect: Valid[Redirect]): Bool = { 34 val flushItself = redirect.bits.flushItself() && this === redirect.bits.robIdx 35 redirect.valid && (flushItself || isAfter(this, redirect.bits.robIdx)) 36 } 37 38 def needFlush(redirect: Seq[Valid[Redirect]]): Bool = VecInit(redirect.map(needFlush)).asUInt.orR 39} 40 41object RobPtr { 42 def apply(f: Bool, v: UInt)(implicit p: Parameters): RobPtr = { 43 val ptr = Wire(new RobPtr) 44 ptr.flag := f 45 ptr.value := v 46 ptr 47 } 48} 49 50class RobCSRIO(implicit p: Parameters) extends XSBundle { 51 val intrBitSet = Input(Bool()) 52 val trapTarget = Input(UInt(VAddrBits.W)) 53 val isXRet = Input(Bool()) 54 val wfiEvent = Input(Bool()) 55 56 val fflags = Output(Valid(UInt(5.W))) 57 val dirty_fs = Output(Bool()) 58 val perfinfo = new Bundle { 59 val retiredInstr = Output(UInt(3.W)) 60 } 61} 62 63class RobLsqIO(implicit p: Parameters) extends XSBundle { 64 val lcommit = Output(UInt(log2Up(CommitWidth + 1).W)) 65 val scommit = Output(UInt(log2Up(CommitWidth + 1).W)) 66 val pendingld = Output(Bool()) 67 val pendingst = Output(Bool()) 68 val commit = Output(Bool()) 69} 70 71class RobEnqIO(implicit p: Parameters) extends XSBundle { 72 val canAccept = Output(Bool()) 73 val isEmpty = Output(Bool()) 74 // valid vector, for robIdx gen and walk 75 val needAlloc = Vec(RenameWidth, Input(Bool())) 76 val req = Vec(RenameWidth, Flipped(ValidIO(new MicroOp))) 77 val resp = Vec(RenameWidth, Output(new RobPtr)) 78} 79 80class RobDispatchData(implicit p: Parameters) extends RobCommitInfo 81 82class RobDeqPtrWrapper(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper { 83 val io = IO(new Bundle { 84 // for commits/flush 85 val state = Input(UInt(2.W)) 86 val deq_v = Vec(CommitWidth, Input(Bool())) 87 val deq_w = Vec(CommitWidth, Input(Bool())) 88 val exception_state = Flipped(ValidIO(new RobExceptionInfo)) 89 // for flush: when exception occurs, reset deqPtrs to range(0, CommitWidth) 90 val intrBitSetReg = Input(Bool()) 91 val hasNoSpecExec = Input(Bool()) 92 val interrupt_safe = Input(Bool()) 93 val blockCommit = Input(Bool()) 94 // output: the CommitWidth deqPtr 95 val out = Vec(CommitWidth, Output(new RobPtr)) 96 val next_out = Vec(CommitWidth, Output(new RobPtr)) 97 }) 98 99 val deqPtrVec = RegInit(VecInit((0 until CommitWidth).map(_.U.asTypeOf(new RobPtr)))) 100 101 // for exceptions (flushPipe included) and interrupts: 102 // only consider the first instruction 103 val intrEnable = io.intrBitSetReg && !io.hasNoSpecExec && io.interrupt_safe 104 val exceptionEnable = io.deq_w(0) && io.exception_state.valid && io.exception_state.bits.not_commit && io.exception_state.bits.robIdx === deqPtrVec(0) 105 val redirectOutValid = io.state === 0.U && io.deq_v(0) && (intrEnable || exceptionEnable) 106 107 // for normal commits: only to consider when there're no exceptions 108 // we don't need to consider whether the first instruction has exceptions since it wil trigger exceptions. 109 val commit_exception = io.exception_state.valid && !isAfter(io.exception_state.bits.robIdx, deqPtrVec.last) 110 val canCommit = VecInit((0 until CommitWidth).map(i => io.deq_v(i) && io.deq_w(i))) 111 val normalCommitCnt = PriorityEncoder(canCommit.map(c => !c) :+ true.B) 112 // when io.intrBitSetReg or there're possible exceptions in these instructions, 113 // only one instruction is allowed to commit 114 val allowOnlyOne = commit_exception || io.intrBitSetReg 115 val commitCnt = Mux(allowOnlyOne, canCommit(0), normalCommitCnt) 116 117 val commitDeqPtrVec = VecInit(deqPtrVec.map(_ + commitCnt)) 118 val deqPtrVec_next = Mux(io.state === 0.U && !redirectOutValid && !io.blockCommit, commitDeqPtrVec, deqPtrVec) 119 120 deqPtrVec := deqPtrVec_next 121 122 io.next_out := deqPtrVec_next 123 io.out := deqPtrVec 124 125 when (io.state === 0.U) { 126 XSInfo(io.state === 0.U && commitCnt > 0.U, "retired %d insts\n", commitCnt) 127 } 128 129} 130 131class RobEnqPtrWrapper(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper { 132 val io = IO(new Bundle { 133 // for input redirect 134 val redirect = Input(Valid(new Redirect)) 135 // for enqueue 136 val allowEnqueue = Input(Bool()) 137 val hasBlockBackward = Input(Bool()) 138 val enq = Vec(RenameWidth, Input(Bool())) 139 val out = Output(Vec(RenameWidth, new RobPtr)) 140 }) 141 142 val enqPtrVec = RegInit(VecInit.tabulate(RenameWidth)(_.U.asTypeOf(new RobPtr))) 143 144 // enqueue 145 val canAccept = io.allowEnqueue && !io.hasBlockBackward 146 val dispatchNum = Mux(canAccept, PopCount(io.enq), 0.U) 147 148 for ((ptr, i) <- enqPtrVec.zipWithIndex) { 149 when(io.redirect.valid) { 150 ptr := Mux(io.redirect.bits.flushItself(), io.redirect.bits.robIdx + i.U, io.redirect.bits.robIdx + (i + 1).U) 151 }.otherwise { 152 ptr := ptr + dispatchNum 153 } 154 } 155 156 io.out := enqPtrVec 157 158} 159 160class RobExceptionInfo(implicit p: Parameters) extends XSBundle { 161 // val valid = Bool() 162 val robIdx = new RobPtr 163 val exceptionVec = ExceptionVec() 164 val flushPipe = Bool() 165 val replayInst = Bool() // redirect to that inst itself 166 val singleStep = Bool() // TODO add frontend hit beneath 167 val crossPageIPFFix = Bool() 168 val trigger = new TriggerCf 169 170// def trigger_before = !trigger.getTimingBackend && trigger.getHitBackend 171// def trigger_after = trigger.getTimingBackend && trigger.getHitBackend 172 def has_exception = exceptionVec.asUInt.orR || flushPipe || singleStep || replayInst || trigger.hit 173 def not_commit = exceptionVec.asUInt.orR || singleStep || replayInst || trigger.hit 174 // only exceptions are allowed to writeback when enqueue 175 def can_writeback = exceptionVec.asUInt.orR || singleStep || trigger.hit 176} 177 178class ExceptionGen(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper { 179 val io = IO(new Bundle { 180 val redirect = Input(Valid(new Redirect)) 181 val flush = Input(Bool()) 182 val enq = Vec(RenameWidth, Flipped(ValidIO(new RobExceptionInfo))) 183 val wb = Vec(1 + LoadPipelineWidth + StorePipelineWidth, Flipped(ValidIO(new RobExceptionInfo))) 184 val out = ValidIO(new RobExceptionInfo) 185 val state = ValidIO(new RobExceptionInfo) 186 }) 187 188 def getOldest(valid: Seq[Bool], bits: Seq[RobExceptionInfo]): (Seq[Bool], Seq[RobExceptionInfo]) = { 189 assert(valid.length == bits.length) 190 assert(isPow2(valid.length)) 191 if (valid.length == 1) { 192 (valid, bits) 193 } else if (valid.length == 2) { 194 val res = Seq.fill(2)(Wire(ValidIO(chiselTypeOf(bits(0))))) 195 for (i <- res.indices) { 196 res(i).valid := valid(i) 197 res(i).bits := bits(i) 198 } 199 val oldest = Mux(!valid(1) || valid(0) && isAfter(bits(1).robIdx, bits(0).robIdx), res(0), res(1)) 200 (Seq(oldest.valid), Seq(oldest.bits)) 201 } else { 202 val left = getOldest(valid.take(valid.length / 2), bits.take(valid.length / 2)) 203 val right = getOldest(valid.takeRight(valid.length / 2), bits.takeRight(valid.length / 2)) 204 getOldest(left._1 ++ right._1, left._2 ++ right._2) 205 } 206 } 207 208 val current = Reg(Valid(new RobExceptionInfo)) 209 210 // orR the exceptionVec 211 val lastCycleFlush = RegNext(io.flush) 212 val in_enq_valid = VecInit(io.enq.map(e => e.valid && e.bits.has_exception && !lastCycleFlush)) 213 val in_wb_valid = io.wb.map(w => w.valid && w.bits.has_exception && !lastCycleFlush) 214 215 // s0: compare wb(1)~wb(LoadPipelineWidth) and wb(1 + LoadPipelineWidth)~wb(LoadPipelineWidth + StorePipelineWidth) 216 val wb_valid = in_wb_valid.zip(io.wb.map(_.bits)).map{ case (v, bits) => v && !(bits.robIdx.needFlush(io.redirect) || io.flush) } 217 val csr_wb_bits = io.wb(0).bits 218 val load_wb_bits = getOldest(in_wb_valid.slice(1, 1 + LoadPipelineWidth), io.wb.map(_.bits).slice(1, 1 + LoadPipelineWidth))._2(0) 219 val store_wb_bits = getOldest(in_wb_valid.slice(1 + LoadPipelineWidth, 1 + LoadPipelineWidth + StorePipelineWidth), io.wb.map(_.bits).slice(1 + LoadPipelineWidth, 1 + LoadPipelineWidth + StorePipelineWidth))._2(0) 220 val s0_out_valid = RegNext(VecInit(Seq(wb_valid(0), wb_valid.slice(1, 1 + LoadPipelineWidth).reduce(_ || _), wb_valid.slice(1 + LoadPipelineWidth, 1 + LoadPipelineWidth + StorePipelineWidth).reduce(_ || _)))) 221 val s0_out_bits = RegNext(VecInit(Seq(csr_wb_bits, load_wb_bits, store_wb_bits))) 222 223 // s1: compare last four and current flush 224 val s1_valid = VecInit(s0_out_valid.zip(s0_out_bits).map{ case (v, b) => v && !(b.robIdx.needFlush(io.redirect) || io.flush) }) 225 val compare_01_valid = s0_out_valid(0) || s0_out_valid(1) 226 val compare_01_bits = Mux(!s0_out_valid(0) || s0_out_valid(1) && isAfter(s0_out_bits(0).robIdx, s0_out_bits(1).robIdx), s0_out_bits(1), s0_out_bits(0)) 227 val compare_bits = Mux(!s0_out_valid(2) || compare_01_valid && isAfter(s0_out_bits(2).robIdx, compare_01_bits.robIdx), compare_01_bits, s0_out_bits(2)) 228 val s1_out_bits = RegNext(compare_bits) 229 val s1_out_valid = RegNext(s1_valid.asUInt.orR) 230 231 val enq_valid = RegNext(in_enq_valid.asUInt.orR && !io.redirect.valid && !io.flush) 232 val enq_bits = RegNext(ParallelPriorityMux(in_enq_valid, io.enq.map(_.bits))) 233 234 // s2: compare the input exception with the current one 235 // priorities: 236 // (1) system reset 237 // (2) current is valid: flush, remain, merge, update 238 // (3) current is not valid: s1 or enq 239 val current_flush = current.bits.robIdx.needFlush(io.redirect) || io.flush 240 val s1_flush = s1_out_bits.robIdx.needFlush(io.redirect) || io.flush 241 when (reset.asBool) { 242 current.valid := false.B 243 }.elsewhen (current.valid) { 244 when (current_flush) { 245 current.valid := Mux(s1_flush, false.B, s1_out_valid) 246 } 247 when (s1_out_valid && !s1_flush) { 248 when (isAfter(current.bits.robIdx, s1_out_bits.robIdx)) { 249 current.bits := s1_out_bits 250 }.elsewhen (current.bits.robIdx === s1_out_bits.robIdx) { 251 current.bits.exceptionVec := (s1_out_bits.exceptionVec.asUInt | current.bits.exceptionVec.asUInt).asTypeOf(ExceptionVec()) 252 current.bits.flushPipe := s1_out_bits.flushPipe || current.bits.flushPipe 253 current.bits.replayInst := s1_out_bits.replayInst || current.bits.replayInst 254 current.bits.singleStep := s1_out_bits.singleStep || current.bits.singleStep 255 current.bits.trigger := (s1_out_bits.trigger.asUInt | current.bits.trigger.asUInt).asTypeOf(new TriggerCf) 256 } 257 } 258 }.elsewhen (s1_out_valid && !s1_flush) { 259 current.valid := true.B 260 current.bits := s1_out_bits 261 }.elsewhen (enq_valid && !(io.redirect.valid || io.flush)) { 262 current.valid := true.B 263 current.bits := enq_bits 264 } 265 266 io.out.valid := s1_out_valid || enq_valid && enq_bits.can_writeback 267 io.out.bits := Mux(s1_out_valid, s1_out_bits, enq_bits) 268 io.state := current 269 270} 271 272class RobFlushInfo(implicit p: Parameters) extends XSBundle { 273 val ftqIdx = new FtqPtr 274 val robIdx = new RobPtr 275 val ftqOffset = UInt(log2Up(PredictWidth).W) 276 val replayInst = Bool() 277} 278 279class Rob(implicit p: Parameters) extends LazyModule with HasWritebackSink with HasXSParameter { 280 281 lazy val module = new RobImp(this) 282 283 override def generateWritebackIO( 284 thisMod: Option[HasWritebackSource] = None, 285 thisModImp: Option[HasWritebackSourceImp] = None 286 ): Unit = { 287 val sources = writebackSinksImp(thisMod, thisModImp) 288 module.io.writeback.zip(sources).foreach(x => x._1 := x._2) 289 } 290} 291 292class RobImp(outer: Rob)(implicit p: Parameters) extends LazyModuleImp(outer) 293 with HasXSParameter with HasCircularQueuePtrHelper with HasPerfEvents { 294 val wbExuConfigs = outer.writebackSinksParams.map(_.exuConfigs) 295 val numWbPorts = wbExuConfigs.map(_.length) 296 297 val io = IO(new Bundle() { 298 val hartId = Input(UInt(8.W)) 299 val redirect = Input(Valid(new Redirect)) 300 val enq = new RobEnqIO 301 val flushOut = ValidIO(new Redirect) 302 val exception = ValidIO(new ExceptionInfo) 303 // exu + brq 304 val writeback = MixedVec(numWbPorts.map(num => Vec(num, Flipped(ValidIO(new ExuOutput))))) 305 val commits = new RobCommitIO 306 val lsq = new RobLsqIO 307 val robDeqPtr = Output(new RobPtr) 308 val csr = new RobCSRIO 309 val robFull = Output(Bool()) 310 val cpu_halt = Output(Bool()) 311 }) 312 313 def selectWb(index: Int, func: Seq[ExuConfig] => Boolean): Seq[(Seq[ExuConfig], ValidIO[ExuOutput])] = { 314 wbExuConfigs(index).zip(io.writeback(index)).filter(x => func(x._1)) 315 } 316 val exeWbSel = outer.selWritebackSinks(_.exuConfigs.length) 317 val fflagsWbSel = outer.selWritebackSinks(_.exuConfigs.count(_.exists(_.writeFflags))) 318 val fflagsPorts = selectWb(fflagsWbSel, _.exists(_.writeFflags)) 319 val exceptionWbSel = outer.selWritebackSinks(_.exuConfigs.count(_.exists(_.needExceptionGen))) 320 val exceptionPorts = selectWb(fflagsWbSel, _.exists(_.needExceptionGen)) 321 val exuWbPorts = selectWb(exeWbSel, _.forall(_ != StdExeUnitCfg)) 322 val stdWbPorts = selectWb(exeWbSel, _.contains(StdExeUnitCfg)) 323 println(s"Rob: size $RobSize, numWbPorts: $numWbPorts, commitwidth: $CommitWidth") 324 println(s"exuPorts: ${exuWbPorts.map(_._1.map(_.name))}") 325 println(s"stdPorts: ${stdWbPorts.map(_._1.map(_.name))}") 326 println(s"fflags: ${fflagsPorts.map(_._1.map(_.name))}") 327 328 329 val exuWriteback = exuWbPorts.map(_._2) 330 val stdWriteback = stdWbPorts.map(_._2) 331 332 // instvalid field 333 val valid = Mem(RobSize, Bool()) 334 // writeback status 335 val writebacked = Mem(RobSize, Bool()) 336 val store_data_writebacked = Mem(RobSize, Bool()) 337 // data for redirect, exception, etc. 338 val flagBkup = Mem(RobSize, Bool()) 339 // some instructions are not allowed to trigger interrupts 340 // They have side effects on the states of the processor before they write back 341 val interrupt_safe = Mem(RobSize, Bool()) 342 343 // data for debug 344 // Warn: debug_* prefix should not exist in generated verilog. 345 val debug_microOp = Mem(RobSize, new MicroOp) 346 val debug_exuData = Reg(Vec(RobSize, UInt(XLEN.W)))//for debug 347 val debug_exuDebug = Reg(Vec(RobSize, new DebugBundle))//for debug 348 349 // pointers 350 // For enqueue ptr, we don't duplicate it since only enqueue needs it. 351 val enqPtrVec = Wire(Vec(RenameWidth, new RobPtr)) 352 val deqPtrVec = Wire(Vec(CommitWidth, new RobPtr)) 353 354 val walkPtrVec = Reg(Vec(CommitWidth, new RobPtr)) 355 val validCounter = RegInit(0.U(log2Ceil(RobSize + 1).W)) 356 val allowEnqueue = RegInit(true.B) 357 358 val enqPtr = enqPtrVec.head 359 val deqPtr = deqPtrVec(0) 360 val walkPtr = walkPtrVec(0) 361 362 val isEmpty = enqPtr === deqPtr 363 val isReplaying = io.redirect.valid && RedirectLevel.flushItself(io.redirect.bits.level) 364 365 /** 366 * states of Rob 367 */ 368 val s_idle :: s_walk :: s_extrawalk :: Nil = Enum(3) 369 val state = RegInit(s_idle) 370 371 /** 372 * Data Modules 373 * 374 * CommitDataModule: data from dispatch 375 * (1) read: commits/walk/exception 376 * (2) write: enqueue 377 * 378 * WritebackData: data from writeback 379 * (1) read: commits/walk/exception 380 * (2) write: write back from exe units 381 */ 382 val dispatchData = Module(new SyncDataModuleTemplate(new RobDispatchData, RobSize, CommitWidth, RenameWidth)) 383 val dispatchDataRead = dispatchData.io.rdata 384 385 val exceptionGen = Module(new ExceptionGen) 386 val exceptionDataRead = exceptionGen.io.state 387 val fflagsDataRead = Wire(Vec(CommitWidth, UInt(5.W))) 388 389 io.robDeqPtr := deqPtr 390 391 /** 392 * Enqueue (from dispatch) 393 */ 394 // special cases 395 val hasBlockBackward = RegInit(false.B) 396 val hasNoSpecExec = RegInit(false.B) 397 val doingSvinval = RegInit(false.B) 398 // When blockBackward instruction leaves Rob (commit or walk), hasBlockBackward should be set to false.B 399 // To reduce registers usage, for hasBlockBackward cases, we allow enqueue after ROB is empty. 400 when (isEmpty) { hasBlockBackward:= false.B } 401 // When any instruction commits, hasNoSpecExec should be set to false.B 402 when ((io.commits.hasWalkInstr && state =/= s_extrawalk) || io.commits.hasCommitInstr) { hasNoSpecExec:= false.B } 403 404 // The wait-for-interrupt (WFI) instruction waits in the ROB until an interrupt might need servicing. 405 // io.csr.wfiEvent will be asserted if the WFI can resume execution, and we change the state to s_wfi_idle. 406 // It does not affect how interrupts are serviced. Note that WFI is noSpecExec and it does not trigger interrupts. 407 val hasWFI = RegInit(false.B) 408 io.cpu_halt := hasWFI 409 when (RegNext(RegNext(io.csr.wfiEvent))) { 410 hasWFI := false.B 411 } 412 413 val allocatePtrVec = VecInit((0 until RenameWidth).map(i => enqPtrVec(PopCount(io.enq.needAlloc.take(i))))) 414 io.enq.canAccept := allowEnqueue && !hasBlockBackward 415 io.enq.resp := allocatePtrVec 416 val canEnqueue = VecInit(io.enq.req.map(_.valid && io.enq.canAccept)) 417 val timer = GTimer() 418 for (i <- 0 until RenameWidth) { 419 // we don't check whether io.redirect is valid here since redirect has higher priority 420 when (canEnqueue(i)) { 421 val enqUop = io.enq.req(i).bits 422 val enqIndex = allocatePtrVec(i).value 423 // store uop in data module and debug_microOp Vec 424 debug_microOp(enqIndex) := enqUop 425 debug_microOp(enqIndex).debugInfo.dispatchTime := timer 426 debug_microOp(enqIndex).debugInfo.enqRsTime := timer 427 debug_microOp(enqIndex).debugInfo.selectTime := timer 428 debug_microOp(enqIndex).debugInfo.issueTime := timer 429 debug_microOp(enqIndex).debugInfo.writebackTime := timer 430 when (enqUop.ctrl.blockBackward) { 431 hasBlockBackward := true.B 432 } 433 when (enqUop.ctrl.noSpecExec) { 434 hasNoSpecExec := true.B 435 } 436 val enqHasTriggerHit = io.enq.req(i).bits.cf.trigger.getHitFrontend 437 val enqHasException = ExceptionNO.selectFrontend(enqUop.cf.exceptionVec).asUInt.orR 438 // the begin instruction of Svinval enqs so mark doingSvinval as true to indicate this process 439 when(!enqHasTriggerHit && !enqHasException && FuType.isSvinvalBegin(enqUop.ctrl.fuType, enqUop.ctrl.fuOpType, enqUop.ctrl.flushPipe)) 440 { 441 doingSvinval := true.B 442 } 443 // the end instruction of Svinval enqs so clear doingSvinval 444 when(!enqHasTriggerHit && !enqHasException && FuType.isSvinvalEnd(enqUop.ctrl.fuType, enqUop.ctrl.fuOpType, enqUop.ctrl.flushPipe)) 445 { 446 doingSvinval := false.B 447 } 448 // when we are in the process of Svinval software code area , only Svinval.vma and end instruction of Svinval can appear 449 assert(!doingSvinval || (FuType.isSvinval(enqUop.ctrl.fuType, enqUop.ctrl.fuOpType, enqUop.ctrl.flushPipe) || 450 FuType.isSvinvalEnd(enqUop.ctrl.fuType, enqUop.ctrl.fuOpType, enqUop.ctrl.flushPipe))) 451 when (enqUop.ctrl.isWFI && !enqHasException && !enqHasTriggerHit) { 452 hasWFI := true.B 453 } 454 } 455 } 456 val dispatchNum = Mux(io.enq.canAccept, PopCount(Cat(io.enq.req.map(_.valid))), 0.U) 457 io.enq.isEmpty := RegNext(isEmpty && dispatchNum === 0.U) 458 459 // debug info for enqueue (dispatch) 460 XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n") 461 XSInfo(dispatchNum =/= 0.U, p"dispatched $dispatchNum insts\n") 462 463 464 /** 465 * Writeback (from execution units) 466 */ 467 for (wb <- exuWriteback) { 468 when (wb.valid) { 469 val wbIdx = wb.bits.uop.robIdx.value 470 debug_exuData(wbIdx) := wb.bits.data 471 debug_exuDebug(wbIdx) := wb.bits.debug 472 debug_microOp(wbIdx).debugInfo.enqRsTime := wb.bits.uop.debugInfo.enqRsTime 473 debug_microOp(wbIdx).debugInfo.selectTime := wb.bits.uop.debugInfo.selectTime 474 debug_microOp(wbIdx).debugInfo.issueTime := wb.bits.uop.debugInfo.issueTime 475 debug_microOp(wbIdx).debugInfo.writebackTime := wb.bits.uop.debugInfo.writebackTime 476 477 val debug_Uop = debug_microOp(wbIdx) 478 XSInfo(true.B, 479 p"writebacked pc 0x${Hexadecimal(debug_Uop.cf.pc)} wen ${debug_Uop.ctrl.rfWen} " + 480 p"data 0x${Hexadecimal(wb.bits.data)} ldst ${debug_Uop.ctrl.ldest} pdst ${debug_Uop.pdest} " + 481 p"skip ${wb.bits.debug.isMMIO} robIdx: ${wb.bits.uop.robIdx}\n" 482 ) 483 } 484 } 485 val writebackNum = PopCount(exuWriteback.map(_.valid)) 486 XSInfo(writebackNum =/= 0.U, "writebacked %d insts\n", writebackNum) 487 488 489 /** 490 * RedirectOut: Interrupt and Exceptions 491 */ 492 val deqDispatchData = dispatchDataRead(0) 493 val debug_deqUop = debug_microOp(deqPtr.value) 494 495 val intrBitSetReg = RegNext(io.csr.intrBitSet) 496 val intrEnable = intrBitSetReg && !hasNoSpecExec && interrupt_safe(deqPtr.value) 497 val deqHasExceptionOrFlush = exceptionDataRead.valid && exceptionDataRead.bits.robIdx === deqPtr 498 val deqHasException = deqHasExceptionOrFlush && (exceptionDataRead.bits.exceptionVec.asUInt.orR || 499 exceptionDataRead.bits.singleStep || exceptionDataRead.bits.trigger.hit) 500 val deqHasFlushPipe = deqHasExceptionOrFlush && exceptionDataRead.bits.flushPipe 501 val deqHasReplayInst = deqHasExceptionOrFlush && exceptionDataRead.bits.replayInst 502 val exceptionEnable = writebacked(deqPtr.value) && deqHasException 503 504 XSDebug(deqHasException && exceptionDataRead.bits.singleStep, "Debug Mode: Deq has singlestep exception\n") 505 XSDebug(deqHasException && exceptionDataRead.bits.trigger.getHitFrontend, "Debug Mode: Deq has frontend trigger exception\n") 506 XSDebug(deqHasException && exceptionDataRead.bits.trigger.getHitBackend, "Debug Mode: Deq has backend trigger exception\n") 507 508 val isFlushPipe = writebacked(deqPtr.value) && (deqHasFlushPipe || deqHasReplayInst) 509 510 // io.flushOut will trigger redirect at the next cycle. 511 // Block any redirect or commit at the next cycle. 512 val lastCycleFlush = RegNext(io.flushOut.valid) 513 514 io.flushOut.valid := (state === s_idle) && valid(deqPtr.value) && (intrEnable || exceptionEnable || isFlushPipe) && !lastCycleFlush 515 io.flushOut.bits := DontCare 516 io.flushOut.bits.robIdx := deqPtr 517 io.flushOut.bits.ftqIdx := deqDispatchData.ftqIdx 518 io.flushOut.bits.ftqOffset := deqDispatchData.ftqOffset 519 io.flushOut.bits.level := Mux(deqHasReplayInst || intrEnable || exceptionEnable, RedirectLevel.flush, RedirectLevel.flushAfter) // TODO use this to implement "exception next" 520 io.flushOut.bits.interrupt := true.B 521 XSPerfAccumulate("interrupt_num", io.flushOut.valid && intrEnable) 522 XSPerfAccumulate("exception_num", io.flushOut.valid && exceptionEnable) 523 XSPerfAccumulate("flush_pipe_num", io.flushOut.valid && isFlushPipe) 524 XSPerfAccumulate("replay_inst_num", io.flushOut.valid && isFlushPipe && deqHasReplayInst) 525 526 val exceptionHappen = (state === s_idle) && valid(deqPtr.value) && (intrEnable || exceptionEnable) && !lastCycleFlush 527 io.exception.valid := RegNext(exceptionHappen) 528 io.exception.bits.uop := RegEnable(debug_deqUop, exceptionHappen) 529 io.exception.bits.uop.ctrl.commitType := RegEnable(deqDispatchData.commitType, exceptionHappen) 530 io.exception.bits.uop.cf.exceptionVec := RegEnable(exceptionDataRead.bits.exceptionVec, exceptionHappen) 531 io.exception.bits.uop.ctrl.singleStep := RegEnable(exceptionDataRead.bits.singleStep, exceptionHappen) 532 io.exception.bits.uop.cf.crossPageIPFFix := RegEnable(exceptionDataRead.bits.crossPageIPFFix, exceptionHappen) 533 io.exception.bits.isInterrupt := RegEnable(intrEnable, exceptionHappen) 534 io.exception.bits.uop.cf.trigger := RegEnable(exceptionDataRead.bits.trigger, exceptionHappen) 535 536 XSDebug(io.flushOut.valid, 537 p"generate redirect: pc 0x${Hexadecimal(io.exception.bits.uop.cf.pc)} intr $intrEnable " + 538 p"excp $exceptionEnable flushPipe $isFlushPipe " + 539 p"Trap_target 0x${Hexadecimal(io.csr.trapTarget)} exceptionVec ${Binary(exceptionDataRead.bits.exceptionVec.asUInt)}\n") 540 541 542 /** 543 * Commits (and walk) 544 * They share the same width. 545 */ 546 val walkCounter = Reg(UInt(log2Up(RobSize + 1).W)) 547 val shouldWalkVec = VecInit((0 until CommitWidth).map(_.U < walkCounter)) 548 val walkFinished = walkCounter <= CommitWidth.U 549 550 // extra space is used when rob has no enough space, but mispredict recovery needs such info to walk regmap 551 require(RenameWidth <= CommitWidth) 552 val extraSpaceForMPR = Reg(Vec(RenameWidth, new RobDispatchData)) 553 val usedSpaceForMPR = Reg(Vec(RenameWidth, Bool())) 554 when (io.enq.needAlloc.asUInt.orR && io.redirect.valid) { 555 usedSpaceForMPR := io.enq.needAlloc 556 extraSpaceForMPR := dispatchData.io.wdata 557 XSDebug("rob full, switched to s_extrawalk. needExtraSpaceForMPR: %b\n", io.enq.needAlloc.asUInt) 558 } 559 560 // wiring to csr 561 val (wflags, fpWen) = (0 until CommitWidth).map(i => { 562 val v = io.commits.commitValid(i) 563 val info = io.commits.info(i) 564 (v & info.wflags, v & info.fpWen) 565 }).unzip 566 val fflags = Wire(Valid(UInt(5.W))) 567 fflags.valid := io.commits.isCommit && VecInit(wflags).asUInt.orR 568 fflags.bits := wflags.zip(fflagsDataRead).map({ 569 case (w, f) => Mux(w, f, 0.U) 570 }).reduce(_|_) 571 val dirty_fs = io.commits.isCommit && VecInit(fpWen).asUInt.orR 572 573 // when mispredict branches writeback, stop commit in the next 2 cycles 574 // TODO: don't check all exu write back 575 val misPredWb = Cat(VecInit(exuWriteback.map(wb => 576 wb.bits.redirect.cfiUpdate.isMisPred && wb.bits.redirectValid 577 ))).orR 578 val misPredBlockCounter = Reg(UInt(3.W)) 579 misPredBlockCounter := Mux(misPredWb, 580 "b111".U, 581 misPredBlockCounter >> 1.U 582 ) 583 val misPredBlock = misPredBlockCounter(0) 584 val blockCommit = misPredBlock || isReplaying || lastCycleFlush || hasWFI 585 586 io.commits.isWalk := state =/= s_idle 587 io.commits.isCommit := state === s_idle && !blockCommit 588 val walk_v = VecInit(walkPtrVec.map(ptr => valid(ptr.value))) 589 val commit_v = VecInit(deqPtrVec.map(ptr => valid(ptr.value))) 590 // store will be commited iff both sta & std have been writebacked 591 val commit_w = VecInit(deqPtrVec.map(ptr => writebacked(ptr.value) && store_data_writebacked(ptr.value))) 592 val commit_exception = exceptionDataRead.valid && !isAfter(exceptionDataRead.bits.robIdx, deqPtrVec.last) 593 val commit_block = VecInit((0 until CommitWidth).map(i => !commit_w(i))) 594 val allowOnlyOneCommit = commit_exception || intrBitSetReg 595 // for instructions that may block others, we don't allow them to commit 596 for (i <- 0 until CommitWidth) { 597 // defaults: state === s_idle and instructions commit 598 // when intrBitSetReg, allow only one instruction to commit at each clock cycle 599 val isBlocked = if (i != 0) Cat(commit_block.take(i)).orR || allowOnlyOneCommit else intrEnable || deqHasException || deqHasReplayInst 600 io.commits.commitValid(i) := commit_v(i) && commit_w(i) && !isBlocked 601 io.commits.info(i) := dispatchDataRead(i) 602 603 io.commits.walkValid(i) := shouldWalkVec(i) 604 when (io.commits.isWalk && state === s_walk && shouldWalkVec(i)) { 605 XSError(!walk_v(i), s"why not $i???\n") 606 } 607 when (state === s_extrawalk) { 608 if (i < RenameWidth) { 609 io.commits.walkValid(i) := usedSpaceForMPR(RenameWidth - i - 1) 610 io.commits.info(i) := extraSpaceForMPR(RenameWidth - i - 1) 611 } 612 else { 613 io.commits.walkValid(i) := false.B 614 } 615 } 616 617 XSInfo(io.commits.isCommit && io.commits.commitValid(i), 618 "retired pc %x wen %d ldest %d pdest %x old_pdest %x data %x fflags: %b\n", 619 debug_microOp(deqPtrVec(i).value).cf.pc, 620 io.commits.info(i).rfWen, 621 io.commits.info(i).ldest, 622 io.commits.info(i).pdest, 623 io.commits.info(i).old_pdest, 624 debug_exuData(deqPtrVec(i).value), 625 fflagsDataRead(i) 626 ) 627 XSInfo(state === s_walk && io.commits.walkValid(i), "walked pc %x wen %d ldst %d data %x\n", 628 debug_microOp(walkPtrVec(i).value).cf.pc, 629 io.commits.info(i).rfWen, 630 io.commits.info(i).ldest, 631 debug_exuData(walkPtrVec(i).value) 632 ) 633 XSInfo(state === s_extrawalk && io.commits.walkValid(i), "use extra space walked wen %d ldst %d\n", 634 io.commits.info(i).rfWen, 635 io.commits.info(i).ldest 636 ) 637 } 638 if (env.EnableDifftest) { 639 io.commits.info.map(info => dontTouch(info.pc)) 640 } 641 642 // sync fflags/dirty_fs to csr 643 io.csr.fflags := RegNext(fflags) 644 io.csr.dirty_fs := RegNext(dirty_fs) 645 646 // commit load/store to lsq 647 val ldCommitVec = VecInit((0 until CommitWidth).map(i => io.commits.commitValid(i) && io.commits.info(i).commitType === CommitType.LOAD)) 648 val stCommitVec = VecInit((0 until CommitWidth).map(i => io.commits.commitValid(i) && io.commits.info(i).commitType === CommitType.STORE)) 649 io.lsq.lcommit := RegNext(Mux(io.commits.isCommit, PopCount(ldCommitVec), 0.U)) 650 io.lsq.scommit := RegNext(Mux(io.commits.isCommit, PopCount(stCommitVec), 0.U)) 651 // indicate a pending load or store 652 io.lsq.pendingld := RegNext(io.commits.isCommit && io.commits.info(0).commitType === CommitType.LOAD && valid(deqPtr.value)) 653 io.lsq.pendingst := RegNext(io.commits.isCommit && io.commits.info(0).commitType === CommitType.STORE && valid(deqPtr.value)) 654 io.lsq.commit := RegNext(io.commits.isCommit && io.commits.commitValid(0)) 655 656 /** 657 * state changes 658 * (1) exceptions: when exception occurs, cancels all and switch to s_idle 659 * (2) redirect: switch to s_walk or s_extrawalk (depends on whether there're pending instructions in dispatch1) 660 * (3) walk: when walking comes to the end, switch to s_walk 661 * (4) s_extrawalk to s_walk 662 */ 663 // state === s_idle: don't change when walk_no_need 664 // state === s_walk: don't change when walk_no_need && walkFinished 665 // state === s_extrawalk: always continue to walk (because it's not possible for walk_no_need) 666 val zeroWalkDistance = enqPtr - 1.U === io.redirect.bits.robIdx && !io.redirect.bits.flushItself() 667 val noNeedToWalk = zeroWalkDistance && (state === s_idle || (state === s_walk && walkFinished)) 668 // update the state depending on whether there is a redirect 669 val state_next = Mux(io.redirect.valid, 670 Mux(io.enq.needAlloc.asUInt.orR, 671 s_extrawalk, 672 Mux(noNeedToWalk, s_idle, s_walk) 673 ), 674 Mux(state === s_walk && walkFinished, 675 s_idle, 676 Mux(state === s_extrawalk, 677 // if no more walk, switch to s_idle 678 Mux(walkCounter === 0.U, s_idle, s_walk), 679 state 680 ) 681 ) 682 ) 683 state := state_next 684 685 /** 686 * pointers and counters 687 */ 688 val deqPtrGenModule = Module(new RobDeqPtrWrapper) 689 deqPtrGenModule.io.state := state 690 deqPtrGenModule.io.deq_v := commit_v 691 deqPtrGenModule.io.deq_w := commit_w 692 deqPtrGenModule.io.exception_state := exceptionDataRead 693 deqPtrGenModule.io.intrBitSetReg := intrBitSetReg 694 deqPtrGenModule.io.hasNoSpecExec := hasNoSpecExec 695 deqPtrGenModule.io.interrupt_safe := interrupt_safe(deqPtr.value) 696 deqPtrGenModule.io.blockCommit := blockCommit 697 deqPtrVec := deqPtrGenModule.io.out 698 val deqPtrVec_next = deqPtrGenModule.io.next_out 699 700 val enqPtrGenModule = Module(new RobEnqPtrWrapper) 701 enqPtrGenModule.io.redirect := io.redirect 702 enqPtrGenModule.io.allowEnqueue := allowEnqueue 703 enqPtrGenModule.io.hasBlockBackward := hasBlockBackward 704 enqPtrGenModule.io.enq := VecInit(io.enq.req.map(_.valid)) 705 enqPtrVec := enqPtrGenModule.io.out 706 707 val thisCycleWalkCount = Mux(walkFinished, walkCounter, CommitWidth.U) 708 // next walkPtrVec: 709 // (1) redirect occurs: update according to state 710 // (2) walk: move backwards 711 val walkPtrVec_next = Mux(io.redirect.valid && state =/= s_extrawalk, 712 Mux(state === s_walk, 713 VecInit(walkPtrVec.map(_ - thisCycleWalkCount)), 714 VecInit((0 until CommitWidth).map(i => enqPtr - (i+1).U)) 715 ), 716 Mux(state === s_walk, VecInit(walkPtrVec.map(_ - CommitWidth.U)), walkPtrVec) 717 ) 718 walkPtrVec := walkPtrVec_next 719 720 val lastCycleRedirect = RegNext(io.redirect.valid) 721 val trueValidCounter = Mux(lastCycleRedirect, distanceBetween(enqPtr, deqPtr), validCounter) 722 val commitCnt = PopCount(io.commits.commitValid) 723 validCounter := Mux(io.commits.isCommit, 724 (validCounter - commitCnt) + dispatchNum, 725 trueValidCounter 726 ) 727 728 allowEnqueue := Mux(io.commits.isCommit, 729 validCounter + dispatchNum <= (RobSize - RenameWidth).U, 730 trueValidCounter <= (RobSize - RenameWidth).U 731 ) 732 733 val currentWalkPtr = Mux(state === s_walk || state === s_extrawalk, walkPtr, enqPtr - 1.U) 734 val redirectWalkDistance = distanceBetween(currentWalkPtr, io.redirect.bits.robIdx) 735 when (io.redirect.valid) { 736 walkCounter := Mux(state === s_walk, 737 // NOTE: +& is used here because: 738 // When rob is full and the head instruction causes an exception, 739 // the redirect robIdx is the deqPtr. In this case, currentWalkPtr is 740 // enqPtr - 1.U and redirectWalkDistance is RobSize - 1. 741 // Since exceptions flush the instruction itself, flushItSelf is true.B. 742 // Previously we use `+` to count the walk distance and it causes overflows 743 // when RobSize is power of 2. We change it to `+&` to allow walkCounter to be RobSize. 744 // The width of walkCounter also needs to be changed. 745 redirectWalkDistance - (thisCycleWalkCount - io.redirect.bits.flushItself()), 746 redirectWalkDistance + io.redirect.bits.flushItself() 747 ) 748 XSError(state === s_walk && thisCycleWalkCount < io.redirect.bits.flushItself(), 749 p"walk distance error ($thisCycleWalkCount < ${io.redirect.bits.flushItself()}\n") 750 }.elsewhen (state === s_walk) { 751 walkCounter := walkCounter - thisCycleWalkCount 752 XSInfo(p"rolling back: $enqPtr $deqPtr walk $walkPtr walkcnt $walkCounter\n") 753 } 754 755 756 /** 757 * States 758 * We put all the stage bits changes here. 759 760 * All events: (1) enqueue (dispatch); (2) writeback; (3) cancel; (4) dequeue (commit); 761 * All states: (1) valid; (2) writebacked; (3) flagBkup 762 */ 763 val commitReadAddr = Mux(state === s_idle, VecInit(deqPtrVec.map(_.value)), VecInit(walkPtrVec.map(_.value))) 764 765 // enqueue logic writes 6 valid 766 for (i <- 0 until RenameWidth) { 767 when (canEnqueue(i) && !io.redirect.valid) { 768 valid(allocatePtrVec(i).value) := true.B 769 } 770 } 771 // dequeue/walk logic writes 6 valid, dequeue and walk will not happen at the same time 772 for (i <- 0 until CommitWidth) { 773 val commitValid = io.commits.isCommit && io.commits.commitValid(i) 774 val walkValid = io.commits.isWalk && io.commits.walkValid(i) && state =/= s_extrawalk 775 when (commitValid || walkValid) { 776 valid(commitReadAddr(i)) := false.B 777 } 778 } 779 // reset: when exception, reset all valid to false 780 when (reset.asBool) { 781 for (i <- 0 until RobSize) { 782 valid(i) := false.B 783 } 784 } 785 786 // status field: writebacked 787 // enqueue logic set 6 writebacked to false 788 for (i <- 0 until RenameWidth) { 789 when (canEnqueue(i)) { 790 val enqHasException = ExceptionNO.selectFrontend(io.enq.req(i).bits.cf.exceptionVec).asUInt.orR 791 val enqHasTriggerHit = io.enq.req(i).bits.cf.trigger.getHitFrontend 792 val enqIsWritebacked = io.enq.req(i).bits.eliminatedMove 793 writebacked(allocatePtrVec(i).value) := enqIsWritebacked && !enqHasException && !enqHasTriggerHit 794 val isStu = io.enq.req(i).bits.ctrl.fuType === FuType.stu 795 store_data_writebacked(allocatePtrVec(i).value) := !isStu 796 } 797 } 798 when (exceptionGen.io.out.valid) { 799 val wbIdx = exceptionGen.io.out.bits.robIdx.value 800 writebacked(wbIdx) := true.B 801 store_data_writebacked(wbIdx) := true.B 802 } 803 // writeback logic set numWbPorts writebacked to true 804 for ((wb, cfgs) <- exuWriteback.zip(wbExuConfigs(exeWbSel))) { 805 when (wb.valid) { 806 val wbIdx = wb.bits.uop.robIdx.value 807 val wbHasException = ExceptionNO.selectByExu(wb.bits.uop.cf.exceptionVec, cfgs).asUInt.orR 808 val wbHasTriggerHit = wb.bits.uop.cf.trigger.getHitBackend 809 val wbHasFlushPipe = cfgs.exists(_.flushPipe).B && wb.bits.uop.ctrl.flushPipe 810 val wbHasReplayInst = cfgs.exists(_.replayInst).B && wb.bits.uop.ctrl.replayInst 811 val block_wb = wbHasException || wbHasFlushPipe || wbHasReplayInst || wbHasTriggerHit 812 writebacked(wbIdx) := !block_wb 813 } 814 } 815 // store data writeback logic mark store as data_writebacked 816 for (wb <- stdWriteback) { 817 when(RegNext(wb.valid)) { 818 store_data_writebacked(RegNext(wb.bits.uop.robIdx.value)) := true.B 819 } 820 } 821 822 // flagBkup 823 // enqueue logic set 6 flagBkup at most 824 for (i <- 0 until RenameWidth) { 825 when (canEnqueue(i)) { 826 flagBkup(allocatePtrVec(i).value) := allocatePtrVec(i).flag 827 } 828 } 829 830 // interrupt_safe 831 for (i <- 0 until RenameWidth) { 832 // We RegNext the updates for better timing. 833 // Note that instructions won't change the system's states in this cycle. 834 when (RegNext(canEnqueue(i))) { 835 // For now, we allow non-load-store instructions to trigger interrupts 836 // For MMIO instructions, they should not trigger interrupts since they may 837 // be sent to lower level before it writes back. 838 // However, we cannot determine whether a load/store instruction is MMIO. 839 // Thus, we don't allow load/store instructions to trigger an interrupt. 840 // TODO: support non-MMIO load-store instructions to trigger interrupts 841 val allow_interrupts = !CommitType.isLoadStore(io.enq.req(i).bits.ctrl.commitType) 842 interrupt_safe(RegNext(allocatePtrVec(i).value)) := RegNext(allow_interrupts) 843 } 844 } 845 846 /** 847 * read and write of data modules 848 */ 849 val commitReadAddr_next = Mux(state_next === s_idle, 850 VecInit(deqPtrVec_next.map(_.value)), 851 VecInit(walkPtrVec_next.map(_.value)) 852 ) 853 dispatchData.io.wen := canEnqueue 854 dispatchData.io.waddr := allocatePtrVec.map(_.value) 855 dispatchData.io.wdata.zip(io.enq.req.map(_.bits)).foreach{ case (wdata, req) => 856 wdata.ldest := req.ctrl.ldest 857 wdata.rfWen := req.ctrl.rfWen 858 wdata.fpWen := req.ctrl.fpWen 859 wdata.wflags := req.ctrl.fpu.wflags 860 wdata.commitType := req.ctrl.commitType 861 wdata.pdest := req.pdest 862 wdata.old_pdest := req.old_pdest 863 wdata.ftqIdx := req.cf.ftqPtr 864 wdata.ftqOffset := req.cf.ftqOffset 865 wdata.pc := req.cf.pc 866 } 867 dispatchData.io.raddr := commitReadAddr_next 868 869 exceptionGen.io.redirect <> io.redirect 870 exceptionGen.io.flush := io.flushOut.valid 871 for (i <- 0 until RenameWidth) { 872 exceptionGen.io.enq(i).valid := canEnqueue(i) 873 exceptionGen.io.enq(i).bits.robIdx := io.enq.req(i).bits.robIdx 874 exceptionGen.io.enq(i).bits.exceptionVec := ExceptionNO.selectFrontend(io.enq.req(i).bits.cf.exceptionVec) 875 exceptionGen.io.enq(i).bits.flushPipe := io.enq.req(i).bits.ctrl.flushPipe 876 exceptionGen.io.enq(i).bits.replayInst := false.B 877 XSError(canEnqueue(i) && io.enq.req(i).bits.ctrl.replayInst, "enq should not set replayInst") 878 exceptionGen.io.enq(i).bits.singleStep := io.enq.req(i).bits.ctrl.singleStep 879 exceptionGen.io.enq(i).bits.crossPageIPFFix := io.enq.req(i).bits.cf.crossPageIPFFix 880 exceptionGen.io.enq(i).bits.trigger.clear() 881 exceptionGen.io.enq(i).bits.trigger.frontendHit := io.enq.req(i).bits.cf.trigger.frontendHit 882 } 883 884 println(s"ExceptionGen:") 885 val exceptionCases = exceptionPorts.map(_._1.flatMap(_.exceptionOut).distinct.sorted) 886 require(exceptionCases.length == exceptionGen.io.wb.length) 887 for ((((configs, wb), exc_wb), i) <- exceptionPorts.zip(exceptionGen.io.wb).zipWithIndex) { 888 exc_wb.valid := wb.valid 889 exc_wb.bits.robIdx := wb.bits.uop.robIdx 890 exc_wb.bits.exceptionVec := ExceptionNO.selectByExu(wb.bits.uop.cf.exceptionVec, configs) 891 exc_wb.bits.flushPipe := configs.exists(_.flushPipe).B && wb.bits.uop.ctrl.flushPipe 892 exc_wb.bits.replayInst := configs.exists(_.replayInst).B && wb.bits.uop.ctrl.replayInst 893 exc_wb.bits.singleStep := false.B 894 exc_wb.bits.crossPageIPFFix := false.B 895 // TODO: make trigger configurable 896 exc_wb.bits.trigger.clear() 897 exc_wb.bits.trigger.backendHit := wb.bits.uop.cf.trigger.backendHit 898 println(s" [$i] ${configs.map(_.name)}: exception ${exceptionCases(i)}, " + 899 s"flushPipe ${configs.exists(_.flushPipe)}, " + 900 s"replayInst ${configs.exists(_.replayInst)}") 901 } 902 903 val fflags_wb = fflagsPorts.map(_._2) 904 val fflagsDataModule = Module(new SyncDataModuleTemplate( 905 UInt(5.W), RobSize, CommitWidth, fflags_wb.size) 906 ) 907 for(i <- fflags_wb.indices){ 908 fflagsDataModule.io.wen (i) := fflags_wb(i).valid 909 fflagsDataModule.io.waddr(i) := fflags_wb(i).bits.uop.robIdx.value 910 fflagsDataModule.io.wdata(i) := fflags_wb(i).bits.fflags 911 } 912 fflagsDataModule.io.raddr := VecInit(deqPtrVec_next.map(_.value)) 913 fflagsDataRead := fflagsDataModule.io.rdata 914 915 916 val instrCntReg = RegInit(0.U(64.W)) 917 val fuseCommitCnt = PopCount(io.commits.commitValid.zip(io.commits.info).map{ case (v, i) => RegNext(v && CommitType.isFused(i.commitType)) }) 918 val trueCommitCnt = RegNext(commitCnt) +& fuseCommitCnt 919 val retireCounter = Mux(RegNext(io.commits.isCommit), trueCommitCnt, 0.U) 920 val instrCnt = instrCntReg + retireCounter 921 instrCntReg := instrCnt 922 io.csr.perfinfo.retiredInstr := retireCounter 923 io.robFull := !allowEnqueue 924 925 /** 926 * debug info 927 */ 928 XSDebug(p"enqPtr ${enqPtr} deqPtr ${deqPtr}\n") 929 XSDebug("") 930 for(i <- 0 until RobSize){ 931 XSDebug(false, !valid(i), "-") 932 XSDebug(false, valid(i) && writebacked(i), "w") 933 XSDebug(false, valid(i) && !writebacked(i), "v") 934 } 935 XSDebug(false, true.B, "\n") 936 937 for(i <- 0 until RobSize) { 938 if(i % 4 == 0) XSDebug("") 939 XSDebug(false, true.B, "%x ", debug_microOp(i).cf.pc) 940 XSDebug(false, !valid(i), "- ") 941 XSDebug(false, valid(i) && writebacked(i), "w ") 942 XSDebug(false, valid(i) && !writebacked(i), "v ") 943 if(i % 4 == 3) XSDebug(false, true.B, "\n") 944 } 945 946 def ifCommit(counter: UInt): UInt = Mux(io.commits.isCommit, counter, 0.U) 947 948 val commitDebugUop = deqPtrVec.map(_.value).map(debug_microOp(_)) 949 XSPerfAccumulate("clock_cycle", 1.U) 950 QueuePerf(RobSize, PopCount((0 until RobSize).map(valid(_))), !allowEnqueue) 951 XSPerfAccumulate("commitUop", ifCommit(commitCnt)) 952 XSPerfAccumulate("commitInstr", ifCommit(trueCommitCnt)) 953 val commitIsMove = commitDebugUop.map(_.ctrl.isMove) 954 XSPerfAccumulate("commitInstrMove", ifCommit(PopCount(io.commits.commitValid.zip(commitIsMove).map{ case (v, m) => v && m }))) 955 val commitMoveElim = commitDebugUop.map(_.debugInfo.eliminatedMove) 956 XSPerfAccumulate("commitInstrMoveElim", ifCommit(PopCount(io.commits.commitValid zip commitMoveElim map { case (v, e) => v && e }))) 957 XSPerfAccumulate("commitInstrFused", ifCommit(fuseCommitCnt)) 958 val commitIsLoad = io.commits.info.map(_.commitType).map(_ === CommitType.LOAD) 959 val commitLoadValid = io.commits.commitValid.zip(commitIsLoad).map{ case (v, t) => v && t } 960 XSPerfAccumulate("commitInstrLoad", ifCommit(PopCount(commitLoadValid))) 961 val commitIsBranch = io.commits.info.map(_.commitType).map(_ === CommitType.BRANCH) 962 val commitBranchValid = io.commits.commitValid.zip(commitIsBranch).map{ case (v, t) => v && t } 963 XSPerfAccumulate("commitInstrBranch", ifCommit(PopCount(commitBranchValid))) 964 val commitLoadWaitBit = commitDebugUop.map(_.cf.loadWaitBit) 965 XSPerfAccumulate("commitInstrLoadWait", ifCommit(PopCount(commitLoadValid.zip(commitLoadWaitBit).map{ case (v, w) => v && w }))) 966 val commitIsStore = io.commits.info.map(_.commitType).map(_ === CommitType.STORE) 967 XSPerfAccumulate("commitInstrStore", ifCommit(PopCount(io.commits.commitValid.zip(commitIsStore).map{ case (v, t) => v && t }))) 968 XSPerfAccumulate("writeback", PopCount((0 until RobSize).map(i => valid(i) && writebacked(i)))) 969 // XSPerfAccumulate("enqInstr", PopCount(io.dp1Req.map(_.fire))) 970 // XSPerfAccumulate("d2rVnR", PopCount(io.dp1Req.map(p => p.valid && !p.ready))) 971 XSPerfAccumulate("walkInstr", Mux(io.commits.isWalk, PopCount(io.commits.walkValid), 0.U)) 972 XSPerfAccumulate("walkCycle", state === s_walk || state === s_extrawalk) 973 val deqNotWritebacked = valid(deqPtr.value) && !writebacked(deqPtr.value) 974 val deqUopCommitType = io.commits.info(0).commitType 975 XSPerfAccumulate("waitNormalCycle", deqNotWritebacked && deqUopCommitType === CommitType.NORMAL) 976 XSPerfAccumulate("waitBranchCycle", deqNotWritebacked && deqUopCommitType === CommitType.BRANCH) 977 XSPerfAccumulate("waitLoadCycle", deqNotWritebacked && deqUopCommitType === CommitType.LOAD) 978 XSPerfAccumulate("waitStoreCycle", deqNotWritebacked && deqUopCommitType === CommitType.STORE) 979 XSPerfAccumulate("robHeadPC", io.commits.info(0).pc) 980 val dispatchLatency = commitDebugUop.map(uop => uop.debugInfo.dispatchTime - uop.debugInfo.renameTime) 981 val enqRsLatency = commitDebugUop.map(uop => uop.debugInfo.enqRsTime - uop.debugInfo.dispatchTime) 982 val selectLatency = commitDebugUop.map(uop => uop.debugInfo.selectTime - uop.debugInfo.enqRsTime) 983 val issueLatency = commitDebugUop.map(uop => uop.debugInfo.issueTime - uop.debugInfo.selectTime) 984 val executeLatency = commitDebugUop.map(uop => uop.debugInfo.writebackTime - uop.debugInfo.issueTime) 985 val rsFuLatency = commitDebugUop.map(uop => uop.debugInfo.writebackTime - uop.debugInfo.enqRsTime) 986 val commitLatency = commitDebugUop.map(uop => timer - uop.debugInfo.writebackTime) 987 def latencySum(cond: Seq[Bool], latency: Seq[UInt]): UInt = { 988 cond.zip(latency).map(x => Mux(x._1, x._2, 0.U)).reduce(_ +& _) 989 } 990 for (fuType <- FuType.functionNameMap.keys) { 991 val fuName = FuType.functionNameMap(fuType) 992 val commitIsFuType = io.commits.commitValid.zip(commitDebugUop).map(x => x._1 && x._2.ctrl.fuType === fuType.U ) 993 XSPerfAccumulate(s"${fuName}_instr_cnt", ifCommit(PopCount(commitIsFuType))) 994 XSPerfAccumulate(s"${fuName}_latency_dispatch", ifCommit(latencySum(commitIsFuType, dispatchLatency))) 995 XSPerfAccumulate(s"${fuName}_latency_enq_rs", ifCommit(latencySum(commitIsFuType, enqRsLatency))) 996 XSPerfAccumulate(s"${fuName}_latency_select", ifCommit(latencySum(commitIsFuType, selectLatency))) 997 XSPerfAccumulate(s"${fuName}_latency_issue", ifCommit(latencySum(commitIsFuType, issueLatency))) 998 XSPerfAccumulate(s"${fuName}_latency_execute", ifCommit(latencySum(commitIsFuType, executeLatency))) 999 XSPerfAccumulate(s"${fuName}_latency_enq_rs_execute", ifCommit(latencySum(commitIsFuType, rsFuLatency))) 1000 XSPerfAccumulate(s"${fuName}_latency_commit", ifCommit(latencySum(commitIsFuType, commitLatency))) 1001 if (fuType == FuType.fmac.litValue) { 1002 val commitIsFma = commitIsFuType.zip(commitDebugUop).map(x => x._1 && x._2.ctrl.fpu.ren3 ) 1003 XSPerfAccumulate(s"${fuName}_instr_cnt_fma", ifCommit(PopCount(commitIsFma))) 1004 XSPerfAccumulate(s"${fuName}_latency_enq_rs_execute_fma", ifCommit(latencySum(commitIsFma, rsFuLatency))) 1005 XSPerfAccumulate(s"${fuName}_latency_execute_fma", ifCommit(latencySum(commitIsFma, executeLatency))) 1006 } 1007 } 1008 1009 //difftest signals 1010 val firstValidCommit = (deqPtr + PriorityMux(io.commits.commitValid, VecInit(List.tabulate(CommitWidth)(_.U)))).value 1011 1012 val wdata = Wire(Vec(CommitWidth, UInt(XLEN.W))) 1013 val wpc = Wire(Vec(CommitWidth, UInt(XLEN.W))) 1014 1015 for(i <- 0 until CommitWidth) { 1016 val idx = deqPtrVec(i).value 1017 wdata(i) := debug_exuData(idx) 1018 wpc(i) := SignExt(commitDebugUop(i).cf.pc, XLEN) 1019 } 1020 1021 if (env.EnableDifftest) { 1022 for (i <- 0 until CommitWidth) { 1023 val difftest = Module(new DifftestInstrCommit) 1024 difftest.io.clock := clock 1025 difftest.io.coreid := io.hartId 1026 difftest.io.index := i.U 1027 1028 val ptr = deqPtrVec(i).value 1029 val uop = commitDebugUop(i) 1030 val exuOut = debug_exuDebug(ptr) 1031 val exuData = debug_exuData(ptr) 1032 difftest.io.valid := RegNext(RegNext(RegNext(io.commits.commitValid(i) && io.commits.isCommit))) 1033 difftest.io.pc := RegNext(RegNext(RegNext(SignExt(uop.cf.pc, XLEN)))) 1034 difftest.io.instr := RegNext(RegNext(RegNext(uop.cf.instr))) 1035 difftest.io.special := RegNext(RegNext(RegNext(CommitType.isFused(io.commits.info(i).commitType)))) 1036 // when committing an eliminated move instruction, 1037 // we must make sure that skip is properly set to false (output from EXU is random value) 1038 difftest.io.skip := RegNext(RegNext(RegNext(Mux(uop.eliminatedMove, false.B, exuOut.isMMIO || exuOut.isPerfCnt)))) 1039 difftest.io.isRVC := RegNext(RegNext(RegNext(uop.cf.pd.isRVC))) 1040 difftest.io.rfwen := RegNext(RegNext(RegNext(io.commits.commitValid(i) && io.commits.info(i).rfWen && io.commits.info(i).ldest =/= 0.U))) 1041 difftest.io.fpwen := RegNext(RegNext(RegNext(io.commits.commitValid(i) && io.commits.info(i).fpWen))) 1042 difftest.io.wpdest := RegNext(RegNext(RegNext(io.commits.info(i).pdest))) 1043 difftest.io.wdest := RegNext(RegNext(RegNext(io.commits.info(i).ldest))) 1044 1045 // // runahead commit hint 1046 // val runahead_commit = Module(new DifftestRunaheadCommitEvent) 1047 // runahead_commit.io.clock := clock 1048 // runahead_commit.io.coreid := io.hartId 1049 // runahead_commit.io.index := i.U 1050 // runahead_commit.io.valid := difftest.io.valid && 1051 // (commitBranchValid(i) || commitIsStore(i)) 1052 // // TODO: is branch or store 1053 // runahead_commit.io.pc := difftest.io.pc 1054 } 1055 } 1056 else if (env.AlwaysBasicDiff) { 1057 // These are the structures used by difftest only and should be optimized after synthesis. 1058 val dt_eliminatedMove = Mem(RobSize, Bool()) 1059 val dt_isRVC = Mem(RobSize, Bool()) 1060 val dt_exuDebug = Reg(Vec(RobSize, new DebugBundle)) 1061 for (i <- 0 until RenameWidth) { 1062 when (canEnqueue(i)) { 1063 dt_eliminatedMove(allocatePtrVec(i).value) := io.enq.req(i).bits.eliminatedMove 1064 dt_isRVC(allocatePtrVec(i).value) := io.enq.req(i).bits.cf.pd.isRVC 1065 } 1066 } 1067 for (wb <- exuWriteback) { 1068 when (wb.valid) { 1069 val wbIdx = wb.bits.uop.robIdx.value 1070 dt_exuDebug(wbIdx) := wb.bits.debug 1071 } 1072 } 1073 // Always instantiate basic difftest modules. 1074 for (i <- 0 until CommitWidth) { 1075 val commitInfo = io.commits.info(i) 1076 val ptr = deqPtrVec(i).value 1077 val exuOut = dt_exuDebug(ptr) 1078 val eliminatedMove = dt_eliminatedMove(ptr) 1079 val isRVC = dt_isRVC(ptr) 1080 1081 val difftest = Module(new DifftestBasicInstrCommit) 1082 difftest.io.clock := clock 1083 difftest.io.coreid := io.hartId 1084 difftest.io.index := i.U 1085 difftest.io.valid := RegNext(RegNext(RegNext(io.commits.commitValid(i) && io.commits.isCommit))) 1086 difftest.io.special := RegNext(RegNext(RegNext(CommitType.isFused(commitInfo.commitType)))) 1087 difftest.io.skip := RegNext(RegNext(RegNext(Mux(eliminatedMove, false.B, exuOut.isMMIO || exuOut.isPerfCnt)))) 1088 difftest.io.isRVC := RegNext(RegNext(RegNext(isRVC))) 1089 difftest.io.rfwen := RegNext(RegNext(RegNext(io.commits.commitValid(i) && commitInfo.rfWen && commitInfo.ldest =/= 0.U))) 1090 difftest.io.fpwen := RegNext(RegNext(RegNext(io.commits.commitValid(i) && commitInfo.fpWen))) 1091 difftest.io.wpdest := RegNext(RegNext(RegNext(commitInfo.pdest))) 1092 difftest.io.wdest := RegNext(RegNext(RegNext(commitInfo.ldest))) 1093 } 1094 } 1095 1096 if (env.EnableDifftest) { 1097 for (i <- 0 until CommitWidth) { 1098 val difftest = Module(new DifftestLoadEvent) 1099 difftest.io.clock := clock 1100 difftest.io.coreid := io.hartId 1101 difftest.io.index := i.U 1102 1103 val ptr = deqPtrVec(i).value 1104 val uop = commitDebugUop(i) 1105 val exuOut = debug_exuDebug(ptr) 1106 difftest.io.valid := RegNext(RegNext(RegNext(io.commits.commitValid(i) && io.commits.isCommit))) 1107 difftest.io.paddr := RegNext(RegNext(RegNext(exuOut.paddr))) 1108 difftest.io.opType := RegNext(RegNext(RegNext(uop.ctrl.fuOpType))) 1109 difftest.io.fuType := RegNext(RegNext(RegNext(uop.ctrl.fuType))) 1110 } 1111 } 1112 1113 // Always instantiate basic difftest modules. 1114 if (env.EnableDifftest) { 1115 val dt_isXSTrap = Mem(RobSize, Bool()) 1116 for (i <- 0 until RenameWidth) { 1117 when (canEnqueue(i)) { 1118 dt_isXSTrap(allocatePtrVec(i).value) := io.enq.req(i).bits.ctrl.isXSTrap 1119 } 1120 } 1121 val trapVec = io.commits.commitValid.zip(deqPtrVec).map{ case (v, d) => io.commits.isCommit && v && dt_isXSTrap(d.value) } 1122 val hitTrap = trapVec.reduce(_||_) 1123 val trapCode = PriorityMux(wdata.zip(trapVec).map(x => x._2 -> x._1)) 1124 val trapPC = SignExt(PriorityMux(wpc.zip(trapVec).map(x => x._2 ->x._1)), XLEN) 1125 val difftest = Module(new DifftestTrapEvent) 1126 difftest.io.clock := clock 1127 difftest.io.coreid := io.hartId 1128 difftest.io.valid := hitTrap 1129 difftest.io.code := trapCode 1130 difftest.io.pc := trapPC 1131 difftest.io.cycleCnt := timer 1132 difftest.io.instrCnt := instrCnt 1133 difftest.io.hasWFI := hasWFI 1134 } 1135 else if (env.AlwaysBasicDiff) { 1136 val dt_isXSTrap = Mem(RobSize, Bool()) 1137 for (i <- 0 until RenameWidth) { 1138 when (canEnqueue(i)) { 1139 dt_isXSTrap(allocatePtrVec(i).value) := io.enq.req(i).bits.ctrl.isXSTrap 1140 } 1141 } 1142 val trapVec = io.commits.commitValid.zip(deqPtrVec).map{ case (v, d) => io.commits.isCommit && v && dt_isXSTrap(d.value) } 1143 val hitTrap = trapVec.reduce(_||_) 1144 val difftest = Module(new DifftestBasicTrapEvent) 1145 difftest.io.clock := clock 1146 difftest.io.coreid := io.hartId 1147 difftest.io.valid := hitTrap 1148 difftest.io.cycleCnt := timer 1149 difftest.io.instrCnt := instrCnt 1150 } 1151 1152 val perfEvents = Seq( 1153 ("rob_interrupt_num ", io.flushOut.valid && intrEnable ), 1154 ("rob_exception_num ", io.flushOut.valid && exceptionEnable ), 1155 ("rob_flush_pipe_num ", io.flushOut.valid && isFlushPipe ), 1156 ("rob_replay_inst_num ", io.flushOut.valid && isFlushPipe && deqHasReplayInst ), 1157 ("rob_commitUop ", ifCommit(commitCnt) ), 1158 ("rob_commitInstr ", ifCommit(trueCommitCnt) ), 1159 ("rob_commitInstrMove ", ifCommit(PopCount(io.commits.commitValid.zip(commitIsMove).map{ case (v, m) => v && m })) ), 1160 ("rob_commitInstrFused ", ifCommit(fuseCommitCnt) ), 1161 ("rob_commitInstrLoad ", ifCommit(PopCount(commitLoadValid)) ), 1162 ("rob_commitInstrLoad ", ifCommit(PopCount(commitBranchValid)) ), 1163 ("rob_commitInstrLoadWait ", ifCommit(PopCount(commitLoadValid.zip(commitLoadWaitBit).map{ case (v, w) => v && w })) ), 1164 ("rob_commitInstrStore ", ifCommit(PopCount(io.commits.commitValid.zip(commitIsStore).map{ case (v, t) => v && t })) ), 1165 ("rob_walkInstr ", Mux(io.commits.isWalk, PopCount(io.commits.walkValid), 0.U) ), 1166 ("rob_walkCycle ", (state === s_walk || state === s_extrawalk) ), 1167 ("rob_1_4_valid ", (PopCount((0 until RobSize).map(valid(_))) < (RobSize.U/4.U)) ), 1168 ("rob_2_4_valid ", (PopCount((0 until RobSize).map(valid(_))) > (RobSize.U/4.U)) & (PopCount((0 until RobSize).map(valid(_))) <= (RobSize.U/2.U)) ), 1169 ("rob_3_4_valid ", (PopCount((0 until RobSize).map(valid(_))) > (RobSize.U/2.U)) & (PopCount((0 until RobSize).map(valid(_))) <= (RobSize.U*3.U/4.U))), 1170 ("rob_4_4_valid ", (PopCount((0 until RobSize).map(valid(_))) > (RobSize.U*3.U/4.U)) ), 1171 ) 1172 generatePerfEvent() 1173} 1174