xref: /XiangShan/src/main/scala/xiangshan/backend/CtrlBlock.scala (revision 37d77575ecbc45c170c8e9b1aed0e82f21bb4696)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.backend
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp}
23import utility._
24import utils._
25import xiangshan.ExceptionNO._
26import xiangshan._
27import xiangshan.backend.Bundles.{DecodedInst, DynInst, ExceptionInfo, ExuOutput}
28import xiangshan.backend.ctrlblock.{DebugLSIO, DebugLsInfoBundle, LsTopdownInfo, MemCtrl, RedirectGenerator}
29import xiangshan.backend.datapath.DataConfig.VAddrData
30import xiangshan.backend.decode.{DecodeStage, FusionDecoder}
31import xiangshan.backend.dispatch.{CoreDispatchTopDownIO, Dispatch, DispatchQueue}
32import xiangshan.backend.fu.PFEvent
33import xiangshan.backend.fu.vector.Bundles.VType
34import xiangshan.backend.rename.{Rename, RenameTableWrapper, SnapshotGenerator}
35import xiangshan.backend.rob.{Rob, RobCSRIO, RobCoreTopDownIO, RobDebugRollingIO, RobLsqIO, RobPtr}
36import xiangshan.frontend.{FtqPtr, FtqRead, Ftq_RF_Components}
37import xiangshan.mem.{LqPtr, LsqEnqIO}
38
39class CtrlToFtqIO(implicit p: Parameters) extends XSBundle {
40  val rob_commits = Vec(CommitWidth, Valid(new RobCommitInfo))
41  val redirect = Valid(new Redirect)
42  val ftqIdxAhead = Vec(BackendRedirectNum, Valid(new FtqPtr))
43  val ftqIdxSelOH = Valid(UInt((BackendRedirectNum).W))
44}
45
46class CtrlBlock(params: BackendParams)(implicit p: Parameters) extends LazyModule {
47  override def shouldBeInlined: Boolean = false
48
49  val rob = LazyModule(new Rob(params))
50
51  lazy val module = new CtrlBlockImp(this)(p, params)
52
53}
54
55class CtrlBlockImp(
56  override val wrapper: CtrlBlock
57)(implicit
58  p: Parameters,
59  params: BackendParams
60) extends LazyModuleImp(wrapper)
61  with HasXSParameter
62  with HasCircularQueuePtrHelper
63  with HasPerfEvents
64{
65  val pcMemRdIndexes = new NamedIndexes(Seq(
66    "exu"       -> params.numPcReadPort,
67    "redirect"  -> 1,
68    "memPred"   -> 1,
69    "robFlush"  -> 1,
70    "load"      -> params.LduCnt,
71    "hybrid"    -> params.HyuCnt,
72    "store"     -> (if(EnableStorePrefetchSMS) params.StaCnt else 0)
73  ))
74
75  private val numPcMemReadForExu = params.numPcReadPort
76  private val numPcMemRead = pcMemRdIndexes.maxIdx
77
78  println(s"pcMem read num: $numPcMemRead")
79  println(s"pcMem read num for exu: $numPcMemReadForExu")
80
81  val io = IO(new CtrlBlockIO())
82
83  val decode = Module(new DecodeStage)
84  val fusionDecoder = Module(new FusionDecoder)
85  val rat = Module(new RenameTableWrapper)
86  val rename = Module(new Rename)
87  val dispatch = Module(new Dispatch)
88  val intDq = Module(new DispatchQueue(dpParams.IntDqSize, RenameWidth, dpParams.IntDqDeqWidth))
89  val fpDq = Module(new DispatchQueue(dpParams.FpDqSize, RenameWidth, dpParams.FpDqDeqWidth))
90  val lsDq = Module(new DispatchQueue(dpParams.LsDqSize, RenameWidth, dpParams.LsDqDeqWidth))
91  val redirectGen = Module(new RedirectGenerator)
92  private val pcMem = Module(new SyncDataModuleTemplate(new Ftq_RF_Components, FtqSize, numPcMemRead, 1, "BackendPC"))
93  private val rob = wrapper.rob.module
94  private val memCtrl = Module(new MemCtrl(params))
95
96  private val disableFusion = decode.io.csrCtrl.singlestep || !decode.io.csrCtrl.fusion_enable
97
98  private val s0_robFlushRedirect = rob.io.flushOut
99  private val s1_robFlushRedirect = Wire(Valid(new Redirect))
100  s1_robFlushRedirect.valid := RegNext(s0_robFlushRedirect.valid, false.B)
101  s1_robFlushRedirect.bits := RegEnable(s0_robFlushRedirect.bits, s0_robFlushRedirect.valid)
102
103  pcMem.io.raddr(pcMemRdIndexes("robFlush").head) := s0_robFlushRedirect.bits.ftqIdx.value
104  private val s1_robFlushPc = pcMem.io.rdata(pcMemRdIndexes("robFlush").head).getPc(RegEnable(s0_robFlushRedirect.bits.ftqOffset, s0_robFlushRedirect.valid))
105  private val s3_redirectGen = redirectGen.io.stage2Redirect
106  private val s1_s3_redirect = Mux(s1_robFlushRedirect.valid, s1_robFlushRedirect, s3_redirectGen)
107  private val s2_s4_pendingRedirectValid = RegInit(false.B)
108  when (s1_s3_redirect.valid) {
109    s2_s4_pendingRedirectValid := true.B
110  }.elsewhen (RegNext(io.frontend.toFtq.redirect.valid)) {
111    s2_s4_pendingRedirectValid := false.B
112  }
113
114  // Redirect will be RegNext at ExuBlocks and IssueBlocks
115  val s2_s4_redirect = RegNextWithEnable(s1_s3_redirect)
116  val s3_s5_redirect = RegNextWithEnable(s2_s4_redirect)
117
118  private val delayedNotFlushedWriteBack = io.fromWB.wbData.map(x => {
119    val valid = x.valid
120    val killedByOlder = x.bits.robIdx.needFlush(Seq(s1_s3_redirect, s2_s4_redirect, s3_s5_redirect))
121    val delayed = Wire(Valid(new ExuOutput(x.bits.params)))
122    delayed.valid := RegNext(valid && !killedByOlder)
123    delayed.bits := RegEnable(x.bits, x.valid)
124    delayed.bits.debugInfo.writebackTime := GTimer()
125    delayed
126  }).toSeq
127
128  val wbDataNoStd = io.fromWB.wbData.filter(!_.bits.params.hasStdFu)
129  private val delayedNotFlushedWriteBackNums = wbDataNoStd.map(x => {
130    val valid = x.valid
131    val killedByOlder = x.bits.robIdx.needFlush(Seq(s1_s3_redirect, s2_s4_redirect, s3_s5_redirect))
132    val delayed = Wire(Valid(UInt(io.fromWB.wbData.size.U.getWidth.W)))
133    delayed.valid := RegNext(valid && !killedByOlder)
134    val sameRobidxBools = VecInit(wbDataNoStd.map( wb => {
135      val killedByOlderThat = wb.bits.robIdx.needFlush(Seq(s1_s3_redirect, s2_s4_redirect, s3_s5_redirect))
136      (wb.bits.robIdx === x.bits.robIdx) && wb.valid && x.valid && !killedByOlderThat && !killedByOlder
137    }).toSeq)
138    dontTouch(sameRobidxBools)
139    delayed.bits := RegNext(PopCount(sameRobidxBools))
140    delayed
141  }).toSeq
142
143  private val exuPredecode = VecInit(
144    delayedNotFlushedWriteBack.filter(_.bits.redirect.nonEmpty).map(x => x.bits.predecodeInfo.get).toSeq
145  )
146
147  private val exuRedirects: Seq[ValidIO[Redirect]] = delayedNotFlushedWriteBack.filter(_.bits.redirect.nonEmpty).map(x => {
148    val out = Wire(Valid(new Redirect()))
149    out.valid := x.valid && x.bits.redirect.get.valid && x.bits.redirect.get.bits.cfiUpdate.isMisPred
150    out.bits := x.bits.redirect.get.bits
151    out.bits.debugIsCtrl := true.B
152    out.bits.debugIsMemVio := false.B
153    out
154  }).toSeq
155
156  private val memViolation = io.fromMem.violation
157  val loadReplay = Wire(ValidIO(new Redirect))
158  loadReplay.valid := RegNext(memViolation.valid &&
159    !memViolation.bits.robIdx.needFlush(Seq(s1_s3_redirect, s2_s4_redirect))
160  )
161  loadReplay.bits := RegEnable(memViolation.bits, memViolation.valid)
162  loadReplay.bits.debugIsCtrl := false.B
163  loadReplay.bits.debugIsMemVio := true.B
164
165  val pdestReverse = rob.io.commits.info.map(info => info.pdest).reverse
166
167  pcMem.io.raddr(pcMemRdIndexes("redirect").head) := redirectGen.io.redirectPcRead.ptr.value
168  redirectGen.io.redirectPcRead.data := pcMem.io.rdata(pcMemRdIndexes("redirect").head).getPc(RegNext(redirectGen.io.redirectPcRead.offset))
169  pcMem.io.raddr(pcMemRdIndexes("memPred").head) := redirectGen.io.memPredPcRead.ptr.value
170  redirectGen.io.memPredPcRead.data := pcMem.io.rdata(pcMemRdIndexes("memPred").head).getPc(RegNext(redirectGen.io.memPredPcRead.offset))
171
172  for ((pcMemIdx, i) <- pcMemRdIndexes("load").zipWithIndex) {
173    // load read pcMem (s0) -> get rdata (s1) -> reg next in Memblock (s2) -> reg next in Memblock (s3) -> consumed by pf (s3)
174    pcMem.io.raddr(pcMemIdx) := io.memLdPcRead(i).ptr.value
175    io.memLdPcRead(i).data := pcMem.io.rdata(pcMemIdx).getPc(RegNext(io.memLdPcRead(i).offset))
176  }
177
178  for ((pcMemIdx, i) <- pcMemRdIndexes("hybrid").zipWithIndex) {
179    // load read pcMem (s0) -> get rdata (s1) -> reg next in Memblock (s2) -> reg next in Memblock (s3) -> consumed by pf (s3)
180    pcMem.io.raddr(pcMemIdx) := io.memHyPcRead(i).ptr.value
181    io.memHyPcRead(i).data := pcMem.io.rdata(pcMemIdx).getPc(RegNext(io.memHyPcRead(i).offset))
182  }
183
184  if (EnableStorePrefetchSMS) {
185    for ((pcMemIdx, i) <- pcMemRdIndexes("store").zipWithIndex) {
186      pcMem.io.raddr(pcMemIdx) := io.memStPcRead(i).ptr.value
187      io.memStPcRead(i).data := pcMem.io.rdata(pcMemIdx).getPc(RegNext(io.memStPcRead(i).offset))
188    }
189  } else {
190    io.memStPcRead.foreach(_.data := 0.U)
191  }
192
193  redirectGen.io.hartId := io.fromTop.hartId
194  redirectGen.io.exuRedirect := exuRedirects.toSeq
195  redirectGen.io.exuOutPredecode := exuPredecode // guarded by exuRedirect.valid
196  redirectGen.io.loadReplay <> loadReplay
197
198  redirectGen.io.robFlush := s1_robFlushRedirect.valid
199
200  val s5_flushFromRobValidAhead = DelayN(s1_robFlushRedirect.valid, 4)
201  val s6_flushFromRobValid = RegNext(s5_flushFromRobValidAhead)
202  val frontendFlushBits = RegEnable(s1_robFlushRedirect.bits, s1_robFlushRedirect.valid) // ??
203  // When ROB commits an instruction with a flush, we notify the frontend of the flush without the commit.
204  // Flushes to frontend may be delayed by some cycles and commit before flush causes errors.
205  // Thus, we make all flush reasons to behave the same as exceptions for frontend.
206  for (i <- 0 until CommitWidth) {
207    // why flushOut: instructions with flushPipe are not commited to frontend
208    // If we commit them to frontend, it will cause flush after commit, which is not acceptable by frontend.
209    val s1_isCommit = rob.io.commits.commitValid(i) && rob.io.commits.isCommit && !s0_robFlushRedirect.valid
210    io.frontend.toFtq.rob_commits(i).valid := RegNext(s1_isCommit)
211    io.frontend.toFtq.rob_commits(i).bits := RegEnable(rob.io.commits.info(i), s1_isCommit)
212  }
213  io.frontend.toFtq.redirect.valid := s6_flushFromRobValid || s3_redirectGen.valid
214  io.frontend.toFtq.redirect.bits := Mux(s6_flushFromRobValid, frontendFlushBits, s3_redirectGen.bits)
215  io.frontend.toFtq.ftqIdxSelOH.valid := s6_flushFromRobValid || redirectGen.io.stage2Redirect.valid
216  io.frontend.toFtq.ftqIdxSelOH.bits := Cat(s6_flushFromRobValid, redirectGen.io.stage2oldestOH & Fill(NumRedirect + 1, !s6_flushFromRobValid))
217
218  //jmp/brh
219  for (i <- 0 until NumRedirect) {
220    io.frontend.toFtq.ftqIdxAhead(i).valid := exuRedirects(i).valid && exuRedirects(i).bits.cfiUpdate.isMisPred && !s1_robFlushRedirect.valid && !s5_flushFromRobValidAhead
221    io.frontend.toFtq.ftqIdxAhead(i).bits := exuRedirects(i).bits.ftqIdx
222  }
223  //loadreplay
224  io.frontend.toFtq.ftqIdxAhead(NumRedirect).valid := loadReplay.valid && !s1_robFlushRedirect.valid && !s5_flushFromRobValidAhead
225  io.frontend.toFtq.ftqIdxAhead(NumRedirect).bits := loadReplay.bits.ftqIdx
226  //exception
227  io.frontend.toFtq.ftqIdxAhead.last.valid := s5_flushFromRobValidAhead
228  io.frontend.toFtq.ftqIdxAhead.last.bits := frontendFlushBits.ftqIdx
229  // Be careful here:
230  // T0: rob.io.flushOut, s0_robFlushRedirect
231  // T1: s1_robFlushRedirect, rob.io.exception.valid
232  // T2: csr.redirect.valid
233  // T3: csr.exception.valid
234  // T4: csr.trapTarget
235  // T5: ctrlBlock.trapTarget
236  // T6: io.frontend.toFtq.stage2Redirect.valid
237  val s2_robFlushPc = RegEnable(Mux(s1_robFlushRedirect.bits.flushItself(),
238    s1_robFlushPc, // replay inst
239    s1_robFlushPc + Mux(s1_robFlushRedirect.bits.isRVC, 2.U, 4.U) // flush pipe
240  ), s1_robFlushRedirect.valid)
241  private val s2_csrIsXRet = io.robio.csr.isXRet
242  private val s5_csrIsTrap = DelayN(rob.io.exception.valid, 4)
243  private val s2_s5_trapTargetFromCsr = io.robio.csr.trapTarget
244
245  val flushTarget = Mux(s2_csrIsXRet || s5_csrIsTrap, s2_s5_trapTargetFromCsr, s2_robFlushPc)
246  when (s6_flushFromRobValid) {
247    io.frontend.toFtq.redirect.bits.level := RedirectLevel.flush
248    io.frontend.toFtq.redirect.bits.cfiUpdate.target := RegEnable(flushTarget, s5_flushFromRobValidAhead)
249  }
250
251  // vtype commit
252  decode.io.commitVType.bits := io.fromDataPath.vtype
253  decode.io.commitVType.valid := RegNext(rob.io.isVsetFlushPipe)
254
255  io.toDataPath.vtypeAddr := rob.io.vconfigPdest
256
257  decode.io.walkVType := rob.io.toDecode.vtype
258
259  decode.io.redirect := s1_s3_redirect.valid || s2_s4_pendingRedirectValid
260
261  decode.io.in.zip(io.frontend.cfVec).foreach { case (decodeIn, frontendCf) =>
262    decodeIn.valid := frontendCf.valid
263    frontendCf.ready := decodeIn.ready
264    decodeIn.bits.connectCtrlFlow(frontendCf.bits)
265  }
266  decode.io.csrCtrl := RegNext(io.csrCtrl)
267  decode.io.intRat <> rat.io.intReadPorts
268  decode.io.fpRat <> rat.io.fpReadPorts
269  decode.io.vecRat <> rat.io.vecReadPorts
270  decode.io.fusion := 0.U.asTypeOf(decode.io.fusion) // Todo
271  decode.io.stallReason.in <> io.frontend.stallReason
272
273  // snapshot check
274  class CFIRobIdx extends Bundle {
275    val robIdx = Vec(RenameWidth, new RobPtr)
276    val isCFI = Vec(RenameWidth, Bool())
277  }
278  val genSnapshot = Cat(rename.io.out.map(out => out.fire && out.bits.snapshot)).orR
279  val snpt = Module(new SnapshotGenerator(0.U.asTypeOf(new CFIRobIdx)))
280  snpt.io.enq := genSnapshot
281  snpt.io.enqData.robIdx := rename.io.out.map(_.bits.robIdx)
282  snpt.io.enqData.isCFI := rename.io.out.map(_.bits.snapshot)
283  snpt.io.deq := snpt.io.valids(snpt.io.deqPtr.value) && rob.io.commits.isCommit &&
284    Cat(rob.io.commits.commitValid.zip(rob.io.commits.robIdx).map(x => x._1 && x._2 === snpt.io.snapshots(snpt.io.deqPtr.value).robIdx.head)).orR
285  snpt.io.redirect := s1_s3_redirect.valid
286  val flushVec = VecInit(snpt.io.snapshots.map { snapshot =>
287    val notCFIMask = snapshot.isCFI.map(~_)
288    val shouldFlush = snapshot.robIdx.map(robIdx => robIdx >= s1_s3_redirect.bits.robIdx || robIdx.value === s1_s3_redirect.bits.robIdx.value)
289    val shouldFlushMask = (1 to RenameWidth).map(shouldFlush take _ reduce (_ || _))
290    s1_s3_redirect.valid && Cat(shouldFlushMask.zip(notCFIMask).map(x => x._1 | x._2)).andR
291  })
292  val flushVecNext = RegNext(flushVec, 0.U.asTypeOf(flushVec))
293  snpt.io.flushVec := flushVecNext
294
295  val useSnpt = VecInit.tabulate(RenameSnapshotNum)(idx =>
296    snpt.io.valids(idx) && s1_s3_redirect.bits.robIdx >= snpt.io.snapshots(idx).robIdx.head
297  ).reduceTree(_ || _)
298  val snptSelect = MuxCase(
299    0.U(log2Ceil(RenameSnapshotNum).W),
300    (1 to RenameSnapshotNum).map(i => (snpt.io.enqPtr - i.U).value).map(idx =>
301      (snpt.io.valids(idx) && s1_s3_redirect.bits.robIdx >= snpt.io.snapshots(idx).robIdx.head, idx)
302    )
303  )
304
305  rob.io.snpt.snptEnq := DontCare
306  rob.io.snpt.snptDeq := snpt.io.deq
307  rob.io.snpt.useSnpt := useSnpt
308  rob.io.snpt.snptSelect := snptSelect
309  rob.io.snpt.flushVec := flushVecNext
310  rat.io.snpt.snptEnq := genSnapshot
311  rat.io.snpt.snptDeq := snpt.io.deq
312  rat.io.snpt.useSnpt := useSnpt
313  rat.io.snpt.snptSelect := snptSelect
314  rat.io.snpt.flushVec := flushVec
315
316  val decodeHasException = decode.io.out.map(x => x.bits.exceptionVec(instrPageFault) || x.bits.exceptionVec(instrAccessFault))
317  // fusion decoder
318  for (i <- 0 until DecodeWidth) {
319    fusionDecoder.io.in(i).valid := decode.io.out(i).valid && !(decodeHasException(i) || disableFusion)
320    fusionDecoder.io.in(i).bits := decode.io.out(i).bits.instr
321    if (i > 0) {
322      fusionDecoder.io.inReady(i - 1) := decode.io.out(i).ready
323    }
324  }
325
326  private val decodePipeRename = Wire(Vec(RenameWidth, DecoupledIO(new DecodedInst)))
327
328  for (i <- 0 until RenameWidth) {
329    PipelineConnect(decode.io.out(i), decodePipeRename(i), rename.io.in(i).ready,
330      s1_s3_redirect.valid || s2_s4_pendingRedirectValid, moduleName = Some("decodePipeRenameModule"))
331
332    decodePipeRename(i).ready := rename.io.in(i).ready
333    rename.io.in(i).valid := decodePipeRename(i).valid && !fusionDecoder.io.clear(i)
334    rename.io.in(i).bits := decodePipeRename(i).bits
335  }
336
337  for (i <- 0 until RenameWidth - 1) {
338    fusionDecoder.io.dec(i) := decodePipeRename(i).bits
339    rename.io.fusionInfo(i) := fusionDecoder.io.info(i)
340
341    // update the first RenameWidth - 1 instructions
342    decode.io.fusion(i) := fusionDecoder.io.out(i).valid && rename.io.out(i).fire
343    when (fusionDecoder.io.out(i).valid) {
344      fusionDecoder.io.out(i).bits.update(rename.io.in(i).bits)
345      // TODO: remove this dirty code for ftq update
346      val sameFtqPtr = rename.io.in(i).bits.ftqPtr.value === rename.io.in(i + 1).bits.ftqPtr.value
347      val ftqOffset0 = rename.io.in(i).bits.ftqOffset
348      val ftqOffset1 = rename.io.in(i + 1).bits.ftqOffset
349      val ftqOffsetDiff = ftqOffset1 - ftqOffset0
350      val cond1 = sameFtqPtr && ftqOffsetDiff === 1.U
351      val cond2 = sameFtqPtr && ftqOffsetDiff === 2.U
352      val cond3 = !sameFtqPtr && ftqOffset1 === 0.U
353      val cond4 = !sameFtqPtr && ftqOffset1 === 1.U
354      rename.io.in(i).bits.commitType := Mux(cond1, 4.U, Mux(cond2, 5.U, Mux(cond3, 6.U, 7.U)))
355      XSError(!cond1 && !cond2 && !cond3 && !cond4, p"new condition $sameFtqPtr $ftqOffset0 $ftqOffset1\n")
356    }
357
358  }
359
360  // memory dependency predict
361  // when decode, send fold pc to mdp
362  private val mdpFlodPcVec = Wire(Vec(DecodeWidth, UInt(MemPredPCWidth.W)))
363  for (i <- 0 until DecodeWidth) {
364    mdpFlodPcVec(i) := Mux(
365      decode.io.out(i).fire,
366      decode.io.in(i).bits.foldpc,
367      rename.io.in(i).bits.foldpc
368    )
369  }
370
371  // currently, we only update mdp info when isReplay
372  memCtrl.io.redirect := s1_s3_redirect
373  memCtrl.io.csrCtrl := io.csrCtrl                          // RegNext in memCtrl
374  memCtrl.io.stIn := io.fromMem.stIn                        // RegNext in memCtrl
375  memCtrl.io.memPredUpdate := redirectGen.io.memPredUpdate  // RegNext in memCtrl
376  memCtrl.io.mdpFlodPcVec := mdpFlodPcVec
377  memCtrl.io.dispatchLFSTio <> dispatch.io.lfst
378
379  rat.io.redirect := s1_s3_redirect.valid
380  rat.io.rabCommits := rob.io.rabCommits
381  rat.io.diffCommits.foreach(_ := rob.io.diffCommits.get)
382  rat.io.intRenamePorts := rename.io.intRenamePorts
383  rat.io.fpRenamePorts := rename.io.fpRenamePorts
384  rat.io.vecRenamePorts := rename.io.vecRenamePorts
385
386  rename.io.redirect := s1_s3_redirect
387  rename.io.rabCommits := rob.io.rabCommits
388  rename.io.waittable := (memCtrl.io.waitTable2Rename zip decode.io.out).map{ case(waittable2rename, decodeOut) =>
389    RegEnable(waittable2rename, decodeOut.fire)
390  }
391  rename.io.ssit := memCtrl.io.ssit2Rename
392  rename.io.intReadPorts := VecInit(rat.io.intReadPorts.map(x => VecInit(x.map(_.data))))
393  rename.io.fpReadPorts := VecInit(rat.io.fpReadPorts.map(x => VecInit(x.map(_.data))))
394  rename.io.vecReadPorts := VecInit(rat.io.vecReadPorts.map(x => VecInit(x.map(_.data))))
395  rename.io.int_need_free := rat.io.int_need_free
396  rename.io.int_old_pdest := rat.io.int_old_pdest
397  rename.io.fp_old_pdest := rat.io.fp_old_pdest
398  rename.io.vec_old_pdest := rat.io.vec_old_pdest
399  rename.io.debug_int_rat.foreach(_ := rat.io.debug_int_rat.get)
400  rename.io.debug_fp_rat.foreach(_ := rat.io.debug_fp_rat.get)
401  rename.io.debug_vec_rat.foreach(_ := rat.io.debug_vec_rat.get)
402  rename.io.debug_vconfig_rat.foreach(_ := rat.io.debug_vconfig_rat.get)
403  rename.io.stallReason.in <> decode.io.stallReason.out
404  rename.io.snpt.snptEnq := DontCare
405  rename.io.snpt.snptDeq := snpt.io.deq
406  rename.io.snpt.useSnpt := useSnpt
407  rename.io.snpt.snptSelect := snptSelect
408  rename.io.robIsEmpty := rob.io.enq.isEmpty
409  rename.io.snpt.flushVec := flushVecNext
410  rename.io.snptLastEnq.valid := !isEmpty(snpt.io.enqPtr, snpt.io.deqPtr)
411  rename.io.snptLastEnq.bits := snpt.io.snapshots((snpt.io.enqPtr - 1.U).value).robIdx.head
412
413  val renameOut = Wire(chiselTypeOf(rename.io.out))
414  renameOut <> rename.io.out
415  dispatch.io.fromRename <> renameOut
416  renameOut.zip(dispatch.io.recv).map{case (rename,recv) => rename.ready := recv}
417  dispatch.io.fromRenameIsFp := rename.io.toDispatchIsFp
418  dispatch.io.fromRenameIsInt := rename.io.toDispatchIsInt
419  dispatch.io.hartId := io.fromTop.hartId
420  dispatch.io.redirect := s1_s3_redirect
421  dispatch.io.enqRob <> rob.io.enq
422  dispatch.io.robHead := rob.io.debugRobHead
423  dispatch.io.stallReason <> rename.io.stallReason.out
424  dispatch.io.lqCanAccept := io.lqCanAccept
425  dispatch.io.sqCanAccept := io.sqCanAccept
426  dispatch.io.robHeadNotReady := rob.io.headNotReady
427  dispatch.io.robFull := rob.io.robFull
428  dispatch.io.singleStep := RegNext(io.csrCtrl.singlestep)
429
430  intDq.io.enq <> dispatch.io.toIntDq
431  intDq.io.redirect <> s2_s4_redirect
432
433  fpDq.io.enq <> dispatch.io.toFpDq
434  fpDq.io.redirect <> s2_s4_redirect
435
436  lsDq.io.enq <> dispatch.io.toLsDq
437  lsDq.io.redirect <> s2_s4_redirect
438
439  io.toIssueBlock.intUops <> intDq.io.deq
440  io.toIssueBlock.vfUops  <> fpDq.io.deq
441  io.toIssueBlock.memUops <> lsDq.io.deq
442  io.toIssueBlock.allocPregs <> dispatch.io.allocPregs
443  io.toIssueBlock.flush   <> s2_s4_redirect
444
445  pcMem.io.wen.head   := RegNext(io.frontend.fromFtq.pc_mem_wen)
446  pcMem.io.waddr.head := RegEnable(io.frontend.fromFtq.pc_mem_waddr, io.frontend.fromFtq.pc_mem_wen)
447  pcMem.io.wdata.head := RegEnable(io.frontend.fromFtq.pc_mem_wdata, io.frontend.fromFtq.pc_mem_wen)
448
449  private val jumpPcVec         : Vec[UInt] = Wire(Vec(params.numPcReadPort, UInt(VAddrData().dataWidth.W)))
450  io.toIssueBlock.pcVec := jumpPcVec
451
452  io.toDataPath.flush := s2_s4_redirect
453  io.toExuBlock.flush := s2_s4_redirect
454
455  for ((pcMemIdx, i) <- pcMemRdIndexes("exu").zipWithIndex) {
456    pcMem.io.raddr(pcMemIdx) := intDq.io.deqNext(i).ftqPtr.value
457    jumpPcVec(i) := pcMem.io.rdata(pcMemIdx).getPc(RegNext(intDq.io.deqNext(i).ftqOffset))
458  }
459
460  val dqOuts = Seq(io.toIssueBlock.intUops) ++ Seq(io.toIssueBlock.vfUops) ++ Seq(io.toIssueBlock.memUops)
461  dqOuts.zipWithIndex.foreach { case (dqOut, dqIdx) =>
462    dqOut.map(_.bits.pc).zipWithIndex.map{ case (pc, portIdx) =>
463      if(params.allSchdParams(dqIdx).numPcReadPort > 0){
464        val realJumpPcVec = jumpPcVec.drop(params.allSchdParams.take(dqIdx).map(_.numPcReadPort).sum).take(params.allSchdParams(dqIdx).numPcReadPort)
465        pc := realJumpPcVec(portIdx)
466      }
467    }
468  }
469
470  rob.io.hartId := io.fromTop.hartId
471  rob.io.redirect := s1_s3_redirect
472  rob.io.writeback := delayedNotFlushedWriteBack
473  rob.io.writebackNums := VecInit(delayedNotFlushedWriteBackNums)
474
475  io.redirect := s1_s3_redirect
476
477  // rob to int block
478  io.robio.csr <> rob.io.csr
479  // When wfi is disabled, it will not block ROB commit.
480  rob.io.csr.wfiEvent := io.robio.csr.wfiEvent
481  rob.io.wfi_enable := decode.io.csrCtrl.wfi_enable
482
483  io.toTop.cpuHalt := DelayN(rob.io.cpu_halt, 5)
484
485  io.robio.csr.perfinfo.retiredInstr <> RegNext(rob.io.csr.perfinfo.retiredInstr)
486  io.robio.exception := rob.io.exception
487  io.robio.exception.bits.pc := s1_robFlushPc
488
489  // rob to mem block
490  io.robio.lsq <> rob.io.lsq
491
492  io.debug_int_rat    .foreach(_ := rat.io.diff_int_rat.get)
493  io.debug_fp_rat     .foreach(_ := rat.io.diff_fp_rat.get)
494  io.debug_vec_rat    .foreach(_ := rat.io.diff_vec_rat.get)
495  io.debug_vconfig_rat.foreach(_ := rat.io.diff_vconfig_rat.get)
496
497  rob.io.debug_ls := io.robio.debug_ls
498  rob.io.debugHeadLsIssue := io.robio.robHeadLsIssue
499  rob.io.lsTopdownInfo := io.robio.lsTopdownInfo
500  rob.io.debugEnqLsq := io.debugEnqLsq
501
502  io.robio.robDeqPtr := rob.io.robDeqPtr
503
504  io.debugTopDown.fromRob := rob.io.debugTopDown.toCore
505  dispatch.io.debugTopDown.fromRob := rob.io.debugTopDown.toDispatch
506  dispatch.io.debugTopDown.fromCore := io.debugTopDown.fromCore
507  io.debugRolling := rob.io.debugRolling
508
509  io.perfInfo.ctrlInfo.robFull := RegNext(rob.io.robFull)
510  io.perfInfo.ctrlInfo.intdqFull := RegNext(intDq.io.dqFull)
511  io.perfInfo.ctrlInfo.fpdqFull := RegNext(fpDq.io.dqFull)
512  io.perfInfo.ctrlInfo.lsdqFull := RegNext(lsDq.io.dqFull)
513
514  val pfevent = Module(new PFEvent)
515  pfevent.io.distribute_csr := RegNext(io.csrCtrl.distribute_csr)
516  val csrevents = pfevent.io.hpmevent.slice(8,16)
517
518  val perfinfo = IO(new Bundle(){
519    val perfEventsRs      = Input(Vec(params.IqCnt, new PerfEvent))
520    val perfEventsEu0     = Input(Vec(6, new PerfEvent))
521    val perfEventsEu1     = Input(Vec(6, new PerfEvent))
522  })
523
524  val perfFromUnits = Seq(decode, rename, dispatch, intDq, fpDq, lsDq, rob).flatMap(_.getPerfEvents)
525  val perfFromIO    = perfinfo.perfEventsEu0.map(x => ("perfEventsEu0", x.value)) ++
526                        perfinfo.perfEventsEu1.map(x => ("perfEventsEu1", x.value)) ++
527                        perfinfo.perfEventsRs.map(x => ("perfEventsRs", x.value))
528  val perfBlock     = Seq()
529  // let index = 0 be no event
530  val allPerfEvents = Seq(("noEvent", 0.U)) ++ perfFromUnits ++ perfFromIO ++ perfBlock
531
532  if (printEventCoding) {
533    for (((name, inc), i) <- allPerfEvents.zipWithIndex) {
534      println("CtrlBlock perfEvents Set", name, inc, i)
535    }
536  }
537
538  val allPerfInc = allPerfEvents.map(_._2.asTypeOf(new PerfEvent))
539  val perfEvents = HPerfMonitor(csrevents, allPerfInc).getPerfEvents
540  generatePerfEvent()
541}
542
543class CtrlBlockIO()(implicit p: Parameters, params: BackendParams) extends XSBundle {
544  val fromTop = new Bundle {
545    val hartId = Input(UInt(8.W))
546  }
547  val toTop = new Bundle {
548    val cpuHalt = Output(Bool())
549  }
550  val frontend = Flipped(new FrontendToCtrlIO())
551  val toIssueBlock = new Bundle {
552    val flush = ValidIO(new Redirect)
553    val allocPregs = Vec(RenameWidth, Output(new ResetPregStateReq))
554    val intUops = Vec(dpParams.IntDqDeqWidth, DecoupledIO(new DynInst))
555    val vfUops = Vec(dpParams.FpDqDeqWidth, DecoupledIO(new DynInst))
556    val memUops = Vec(dpParams.LsDqDeqWidth, DecoupledIO(new DynInst))
557    val pcVec = Output(Vec(params.numPcReadPort, UInt(VAddrData().dataWidth.W)))
558  }
559  val fromDataPath = new Bundle{
560    val vtype = Input(new VType)
561  }
562  val toDataPath = new Bundle {
563    val vtypeAddr = Output(UInt(PhyRegIdxWidth.W))
564    val flush = ValidIO(new Redirect)
565  }
566  val toExuBlock = new Bundle {
567    val flush = ValidIO(new Redirect)
568  }
569  val fromWB = new Bundle {
570    val wbData = Flipped(MixedVec(params.genWrite2CtrlBundles))
571  }
572  val redirect = ValidIO(new Redirect)
573  val fromMem = new Bundle {
574    val stIn = Vec(params.StaExuCnt, Flipped(ValidIO(new DynInst))) // use storeSetHit, ssid, robIdx
575    val violation = Flipped(ValidIO(new Redirect))
576  }
577  val memLdPcRead = Vec(params.LduCnt, Flipped(new FtqRead(UInt(VAddrBits.W))))
578  val memStPcRead = Vec(params.StaCnt, Flipped(new FtqRead(UInt(VAddrBits.W))))
579  val memHyPcRead = Vec(params.HyuCnt, Flipped(new FtqRead(UInt(VAddrBits.W))))
580
581  val csrCtrl = Input(new CustomCSRCtrlIO)
582  val robio = new Bundle {
583    val csr = new RobCSRIO
584    val exception = ValidIO(new ExceptionInfo)
585    val lsq = new RobLsqIO
586    val lsTopdownInfo = Vec(params.LduCnt + params.HyuCnt, Input(new LsTopdownInfo))
587    val debug_ls = Input(new DebugLSIO())
588    val robHeadLsIssue = Input(Bool())
589    val robDeqPtr = Output(new RobPtr)
590  }
591
592  val perfInfo = Output(new Bundle{
593    val ctrlInfo = new Bundle {
594      val robFull   = Bool()
595      val intdqFull = Bool()
596      val fpdqFull  = Bool()
597      val lsdqFull  = Bool()
598    }
599  })
600  val debug_int_rat     = if (params.debugEn) Some(Vec(32, Output(UInt(PhyRegIdxWidth.W)))) else None
601  val debug_fp_rat      = if (params.debugEn) Some(Vec(32, Output(UInt(PhyRegIdxWidth.W)))) else None
602  val debug_vec_rat     = if (params.debugEn) Some(Vec(32, Output(UInt(PhyRegIdxWidth.W)))) else None
603  val debug_vconfig_rat = if (params.debugEn) Some(Output(UInt(PhyRegIdxWidth.W))) else None // TODO: use me
604
605  val sqCanAccept = Input(Bool())
606  val lqCanAccept = Input(Bool())
607
608  val debugTopDown = new Bundle {
609    val fromRob = new RobCoreTopDownIO
610    val fromCore = new CoreDispatchTopDownIO
611  }
612  val debugRolling = new RobDebugRollingIO
613  val debugEnqLsq = Input(new LsqEnqIO)
614}
615
616class NamedIndexes(namedCnt: Seq[(String, Int)]) {
617  require(namedCnt.map(_._1).distinct.size == namedCnt.size, "namedCnt should not have the same name")
618
619  val maxIdx = namedCnt.map(_._2).sum
620  val nameRangeMap: Map[String, (Int, Int)] = namedCnt.indices.map { i =>
621    val begin = namedCnt.slice(0, i).map(_._2).sum
622    val end = begin + namedCnt(i)._2
623    (namedCnt(i)._1, (begin, end))
624  }.toMap
625
626  def apply(name: String): Seq[Int] = {
627    require(nameRangeMap.contains(name))
628    nameRangeMap(name)._1 until nameRangeMap(name)._2
629  }
630}
631