xref: /XiangShan/src/main/scala/xiangshan/backend/CtrlBlock.scala (revision c7d010e5062a987c3e027d867e1b2c36061ec895)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.backend
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp}
23import utility._
24import utils._
25import xiangshan.ExceptionNO._
26import xiangshan._
27import xiangshan.backend.Bundles.{DecodedInst, DynInst, ExceptionInfo, ExuOutput}
28import xiangshan.backend.ctrlblock.{DebugLSIO, DebugLsInfoBundle, LsTopdownInfo, MemCtrl, RedirectGenerator}
29import xiangshan.backend.datapath.DataConfig.VAddrData
30import xiangshan.backend.decode.{DecodeStage, FusionDecoder}
31import xiangshan.backend.dispatch.{CoreDispatchTopDownIO, Dispatch, DispatchQueue}
32import xiangshan.backend.fu.PFEvent
33import xiangshan.backend.fu.vector.Bundles.VType
34import xiangshan.backend.rename.{Rename, RenameTableWrapper, SnapshotGenerator}
35import xiangshan.backend.rob.{Rob, RobCSRIO, RobCoreTopDownIO, RobDebugRollingIO, RobLsqIO, RobPtr}
36import xiangshan.frontend.{FtqRead, Ftq_RF_Components}
37
38class CtrlToFtqIO(implicit p: Parameters) extends XSBundle {
39  val rob_commits = Vec(CommitWidth, Valid(new RobCommitInfo))
40  val redirect = Valid(new Redirect)
41  val ftqIdxAhead = Vec(BackendRedirectNum, Valid(new FtqPtr))
42  val ftqIdxSelOH = Valid(UInt((BackendRedirectNum).W))
43}
44
45class CtrlBlock(params: BackendParams)(implicit p: Parameters) extends LazyModule {
46  val rob = LazyModule(new Rob(params))
47
48  lazy val module = new CtrlBlockImp(this)(p, params)
49
50}
51
52class CtrlBlockImp(
53  override val wrapper: CtrlBlock
54)(implicit
55  p: Parameters,
56  params: BackendParams
57) extends LazyModuleImp(wrapper)
58  with HasXSParameter
59  with HasCircularQueuePtrHelper
60  with HasPerfEvents
61{
62  val pcMemRdIndexes = new NamedIndexes(Seq(
63    "exu"       -> params.numPcReadPort,
64    "redirect"  -> 1,
65    "memPred"   -> 1,
66    "robFlush"  -> 1,
67    "load"      -> params.LduCnt,
68    "store"     -> (if(EnableStorePrefetchSMS) params.StaCnt else 0)
69  ))
70
71  private val numPcMemReadForExu = params.numPcReadPort
72  private val numPcMemRead = pcMemRdIndexes.maxIdx
73
74  println(s"pcMem read num: $numPcMemRead")
75  println(s"pcMem read num for exu: $numPcMemReadForExu")
76
77  val io = IO(new CtrlBlockIO())
78
79  val decode = Module(new DecodeStage)
80  val fusionDecoder = Module(new FusionDecoder)
81  val rat = Module(new RenameTableWrapper)
82  val rename = Module(new Rename)
83  val dispatch = Module(new Dispatch)
84  val intDq = Module(new DispatchQueue(dpParams.IntDqSize, RenameWidth, dpParams.IntDqDeqWidth))
85  val fpDq = Module(new DispatchQueue(dpParams.FpDqSize, RenameWidth, dpParams.FpDqDeqWidth))
86  val lsDq = Module(new DispatchQueue(dpParams.LsDqSize, RenameWidth, dpParams.LsDqDeqWidth))
87  val redirectGen = Module(new RedirectGenerator)
88  private val pcMem = Module(new SyncDataModuleTemplate(new Ftq_RF_Components, FtqSize, numPcMemRead, 1, "BackendPC"))
89  private val rob = wrapper.rob.module
90  private val memCtrl = Module(new MemCtrl(params))
91
92  private val disableFusion = decode.io.csrCtrl.singlestep || !decode.io.csrCtrl.fusion_enable
93
94  private val s0_robFlushRedirect = rob.io.flushOut
95  private val s1_robFlushRedirect = Wire(Valid(new Redirect))
96  s1_robFlushRedirect.valid := RegNext(s0_robFlushRedirect.valid)
97  s1_robFlushRedirect.bits := RegEnable(s0_robFlushRedirect.bits, s0_robFlushRedirect.valid)
98
99  pcMem.io.raddr(pcMemRdIndexes("robFlush").head) := s0_robFlushRedirect.bits.ftqIdx.value
100  private val s1_robFlushPc = pcMem.io.rdata(pcMemRdIndexes("robFlush").head).getPc(RegNext(s0_robFlushRedirect.bits.ftqOffset))
101  private val s3_redirectGen = redirectGen.io.stage2Redirect
102  private val s1_s3_redirect = Mux(s1_robFlushRedirect.valid, s1_robFlushRedirect, s3_redirectGen)
103  private val s2_s4_pendingRedirectValid = RegInit(false.B)
104  when (s1_s3_redirect.valid) {
105    s2_s4_pendingRedirectValid := true.B
106  }.elsewhen (RegNext(io.frontend.toFtq.redirect.valid)) {
107    s2_s4_pendingRedirectValid := false.B
108  }
109
110  // Redirect will be RegNext at ExuBlocks and IssueBlocks
111  val s2_s4_redirect = RegNextWithEnable(s1_s3_redirect)
112  val s3_s5_redirect = RegNextWithEnable(s2_s4_redirect)
113
114  private val delayedNotFlushedWriteBack = io.fromWB.wbData.map(x => {
115    val valid = x.valid
116    val killedByOlder = x.bits.robIdx.needFlush(Seq(s1_s3_redirect, s2_s4_redirect, s3_s5_redirect))
117    val delayed = Wire(Valid(new ExuOutput(x.bits.params)))
118    delayed.valid := RegNext(valid && !killedByOlder)
119    delayed.bits := RegEnable(x.bits, x.valid)
120    delayed.bits.debugInfo.writebackTime := GTimer()
121    delayed
122  }).toSeq
123
124  private val exuPredecode = VecInit(
125    delayedNotFlushedWriteBack.filter(_.bits.redirect.nonEmpty).map(x => x.bits.predecodeInfo.get).toSeq
126  )
127
128  private val exuRedirects: Seq[ValidIO[Redirect]] = delayedNotFlushedWriteBack.filter(_.bits.redirect.nonEmpty).map(x => {
129    val out = Wire(Valid(new Redirect()))
130    out.valid := x.valid && x.bits.redirect.get.valid && x.bits.redirect.get.bits.cfiUpdate.isMisPred
131    out.bits := x.bits.redirect.get.bits
132    out.bits.debugIsCtrl := true.B
133    out.bits.debugIsMemVio := false.B
134    out
135  }).toSeq
136
137  private val memViolation = io.fromMem.violation
138  val loadReplay = Wire(ValidIO(new Redirect))
139  loadReplay.valid := RegNext(memViolation.valid &&
140    !memViolation.bits.robIdx.needFlush(Seq(s1_s3_redirect, s2_s4_redirect))
141  )
142  loadReplay.bits := RegEnable(memViolation.bits, memViolation.valid)
143  loadReplay.bits.debugIsCtrl := false.B
144  loadReplay.bits.debugIsMemVio := true.B
145
146  val pdestReverse = rob.io.commits.info.map(info => info.pdest).reverse
147
148  pcMem.io.raddr(pcMemRdIndexes("redirect").head) := redirectGen.io.redirectPcRead.ptr.value
149  redirectGen.io.redirectPcRead.data := pcMem.io.rdata(pcMemRdIndexes("redirect").head).getPc(RegNext(redirectGen.io.redirectPcRead.offset))
150  pcMem.io.raddr(pcMemRdIndexes("memPred").head) := redirectGen.io.memPredPcRead.ptr.value
151  redirectGen.io.memPredPcRead.data := pcMem.io.rdata(pcMemRdIndexes("memPred").head).getPc(RegNext(redirectGen.io.memPredPcRead.offset))
152
153  for ((pcMemIdx, i) <- pcMemRdIndexes("load").zipWithIndex) {
154    pcMem.io.raddr(pcMemIdx) := io.memLdPcRead(i).ptr.value
155    io.memLdPcRead(i).data := pcMem.io.rdata(pcMemIdx).getPc(RegNext(io.memLdPcRead(i).offset))
156  }
157
158  if (EnableStorePrefetchSMS) {
159    for ((pcMemIdx, i) <- pcMemRdIndexes("store").zipWithIndex) {
160      pcMem.io.raddr(pcMemIdx) := io.memStPcRead(i).ptr.value
161      io.memStPcRead(i).data := pcMem.io.rdata(pcMemIdx).getPc(RegNext(io.memStPcRead(i).offset))
162    }
163  } else {
164    io.memStPcRead.foreach(_.data := 0.U)
165  }
166
167  redirectGen.io.hartId := io.fromTop.hartId
168  redirectGen.io.exuRedirect := exuRedirects.toSeq
169  redirectGen.io.exuOutPredecode := exuPredecode // guarded by exuRedirect.valid
170  redirectGen.io.loadReplay <> loadReplay
171
172  redirectGen.io.robFlush := s1_robFlushRedirect.valid
173
174  val frontendFlushValidAhead = DelayN(s1_robFlushRedirect.valid, 5)
175  val s6_frontendFlushValid = RegNext(frontendFlushValidAhead)
176  val frontendFlushBits = RegEnable(s1_robFlushRedirect.bits, s1_robFlushRedirect.valid) // ??
177  // When ROB commits an instruction with a flush, we notify the frontend of the flush without the commit.
178  // Flushes to frontend may be delayed by some cycles and commit before flush causes errors.
179  // Thus, we make all flush reasons to behave the same as exceptions for frontend.
180  for (i <- 0 until CommitWidth) {
181    // why flushOut: instructions with flushPipe are not commited to frontend
182    // If we commit them to frontend, it will cause flush after commit, which is not acceptable by frontend.
183    val s1_isCommit = rob.io.commits.commitValid(i) && rob.io.commits.isCommit && !s0_robFlushRedirect.valid
184    io.frontend.toFtq.rob_commits(i).valid := RegNext(s1_isCommit)
185    io.frontend.toFtq.rob_commits(i).bits := RegEnable(rob.io.commits.info(i), s1_isCommit)
186  }
187  io.frontend.toFtq.redirect.valid := s6_frontendFlushValid || s3_redirectGen.valid
188  io.frontend.toFtq.redirect.bits := Mux(s6_frontendFlushValid, frontendFlushBits, s3_redirectGen.bits)
189  io.frontend.toFtq.ftqIdxSelOH.valid := frontendFlushValid || redirectGen.io.stage2Redirect.valid
190  io.frontend.toFtq.ftqIdxSelOH.bits := Cat(frontendFlushValid, redirectGen.io.stage2oldestOH & Fill(NumRedirect + 1, !frontendFlushValid))
191
192  //jmp/brh
193  for (i <- 0 until NumRedirect) {
194    io.frontend.toFtq.ftqIdxAhead(i).valid := exuRedirect(i).valid && exuRedirect(i).bits.redirect.cfiUpdate.isMisPred && !flushRedirect.valid && !frontendFlushValidAhead
195    io.frontend.toFtq.ftqIdxAhead(i).bits := exuRedirect(i).bits.redirect.ftqIdx
196  }
197  //loadreplay
198  io.frontend.toFtq.ftqIdxAhead(NumRedirect).valid := loadReplay.valid && !flushRedirect.valid && !frontendFlushValidAhead
199  io.frontend.toFtq.ftqIdxAhead(NumRedirect).bits := loadReplay.bits.ftqIdx
200  //exception
201  io.frontend.toFtq.ftqIdxAhead.last.valid := frontendFlushValidAhead
202  io.frontend.toFtq.ftqIdxAhead.last.bits := frontendFlushBits.ftqIdx
203  // Be careful here:
204  // T0: rob.io.flushOut, s0_robFlushRedirect
205  // T1: s1_robFlushRedirect, rob.io.exception.valid
206  // T2: csr.redirect.valid
207  // T3: csr.exception.valid
208  // T4: csr.trapTarget
209  // T5: ctrlBlock.trapTarget
210  // T6: io.frontend.toFtq.stage2Redirect.valid
211  val s2_robFlushPc = RegEnable(Mux(s1_robFlushRedirect.bits.flushItself(),
212    s1_robFlushPc, // replay inst
213    s1_robFlushPc + Mux(s1_robFlushRedirect.bits.isRVC, 2.U, 4.U) // flush pipe
214  ), s1_robFlushRedirect.valid)
215  private val s2_csrIsXRet = io.robio.csr.isXRet
216  private val s5_csrIsTrap = DelayN(rob.io.exception.valid, 4)
217  private val s2_s5_trapTargetFromCsr = io.robio.csr.trapTarget
218
219  val flushTarget = Mux(s2_csrIsXRet || s5_csrIsTrap, s2_s5_trapTargetFromCsr, s2_robFlushPc)
220  when (s6_frontendFlushValid) {
221    io.frontend.toFtq.redirect.bits.level := RedirectLevel.flush
222    io.frontend.toFtq.redirect.bits.cfiUpdate.target := RegNext(flushTarget)
223  }
224
225  // vtype commit
226  decode.io.commitVType.bits := io.fromDataPath.vtype
227  decode.io.commitVType.valid := RegNext(rob.io.isVsetFlushPipe)
228
229  io.toDataPath.vtypeAddr := rob.io.vconfigPdest
230
231  // vtype walk
232  val isVsetSeq = rob.io.commits.walkValid.zip(rob.io.commits.info).map { case (valid, info) => valid && info.isVset }.reverse
233  val walkVTypeReverse = rob.io.commits.info.map(info => info.vtype).reverse
234  val walkVType = PriorityMux(isVsetSeq, walkVTypeReverse)
235
236  decode.io.walkVType.bits := walkVType.asTypeOf(new VType)
237  decode.io.walkVType.valid := rob.io.commits.isWalk && isVsetSeq.reduce(_ || _)
238
239  decode.io.isRedirect := s1_s3_redirect.valid
240
241  decode.io.in.zip(io.frontend.cfVec).foreach { case (decodeIn, frontendCf) =>
242    decodeIn.valid := frontendCf.valid
243    frontendCf.ready := decodeIn.ready
244    decodeIn.bits.connectCtrlFlow(frontendCf.bits)
245  }
246  decode.io.csrCtrl := RegNext(io.csrCtrl)
247  decode.io.intRat <> rat.io.intReadPorts
248  decode.io.fpRat <> rat.io.fpReadPorts
249  decode.io.vecRat <> rat.io.vecReadPorts
250  decode.io.fusion := 0.U.asTypeOf(decode.io.fusion) // Todo
251  decode.io.stallReason.in <> io.frontend.stallReason
252
253  // snapshot check
254  val snpt = Module(new SnapshotGenerator(rename.io.out.head.bits.robIdx))
255  snpt.io.enq := rename.io.out.head.bits.snapshot && rename.io.out.head.fire
256  snpt.io.enqData.head := rename.io.out.head.bits.robIdx
257  snpt.io.deq := snpt.io.valids(snpt.io.deqPtr.value) && rob.io.commits.isCommit &&
258    Cat(rob.io.commits.commitValid.zip(rob.io.commits.robIdx).map(x => x._1 && x._2 === snpt.io.snapshots(snpt.io.deqPtr.value))).orR
259  snpt.io.flush := s1_s3_redirect.valid
260
261  val useSnpt = VecInit.tabulate(RenameSnapshotNum)(idx =>
262    snpt.io.valids(idx) && s1_s3_redirect.bits.robIdx >= snpt.io.snapshots(idx)
263  ).reduceTree(_ || _)
264  val snptSelect = MuxCase(
265    0.U(log2Ceil(RenameSnapshotNum).W),
266    (1 to RenameSnapshotNum).map(i => (snpt.io.enqPtr - i.U).value).map(idx =>
267      (snpt.io.valids(idx) && s1_s3_redirect.bits.robIdx >= snpt.io.snapshots(idx), idx)
268    )
269  )
270
271  rob.io.snpt.snptEnq := DontCare
272  rob.io.snpt.snptDeq := snpt.io.deq
273  rob.io.snpt.useSnpt := useSnpt
274  rob.io.snpt.snptSelect := snptSelect
275  rat.io.snpt.snptEnq := rename.io.out.head.bits.snapshot && rename.io.out.head.fire
276  rat.io.snpt.snptDeq := snpt.io.deq
277  rat.io.snpt.useSnpt := useSnpt
278  rat.io.snpt.snptSelect := snptSelect
279
280  val decodeHasException = decode.io.out.map(x => x.bits.exceptionVec(instrPageFault) || x.bits.exceptionVec(instrAccessFault))
281  // fusion decoder
282  for (i <- 0 until DecodeWidth) {
283    fusionDecoder.io.in(i).valid := decode.io.out(i).valid && !(decodeHasException(i) || disableFusion)
284    fusionDecoder.io.in(i).bits := decode.io.out(i).bits.instr
285    if (i > 0) {
286      fusionDecoder.io.inReady(i - 1) := decode.io.out(i).ready
287    }
288  }
289
290  private val decodePipeRename = Wire(Vec(RenameWidth, DecoupledIO(new DecodedInst)))
291
292  for (i <- 0 until RenameWidth) {
293    PipelineConnect(decode.io.out(i), decodePipeRename(i), rename.io.in(i).ready,
294      s1_s3_redirect.valid || s2_s4_pendingRedirectValid, moduleName = Some("decodePipeRenameModule"))
295
296    decodePipeRename(i).ready := rename.io.in(i).ready
297    rename.io.in(i).valid := decodePipeRename(i).valid && !fusionDecoder.io.clear(i)
298    rename.io.in(i).bits := decodePipeRename(i).bits
299  }
300
301  for (i <- 0 until RenameWidth - 1) {
302    fusionDecoder.io.dec(i) := decodePipeRename(i).bits
303    rename.io.fusionInfo(i) := fusionDecoder.io.info(i)
304
305    // update the first RenameWidth - 1 instructions
306    decode.io.fusion(i) := fusionDecoder.io.out(i).valid && rename.io.out(i).fire
307    when (fusionDecoder.io.out(i).valid) {
308      fusionDecoder.io.out(i).bits.update(rename.io.in(i).bits)
309      // TODO: remove this dirty code for ftq update
310      val sameFtqPtr = rename.io.in(i).bits.ftqPtr.value === rename.io.in(i + 1).bits.ftqPtr.value
311      val ftqOffset0 = rename.io.in(i).bits.ftqOffset
312      val ftqOffset1 = rename.io.in(i + 1).bits.ftqOffset
313      val ftqOffsetDiff = ftqOffset1 - ftqOffset0
314      val cond1 = sameFtqPtr && ftqOffsetDiff === 1.U
315      val cond2 = sameFtqPtr && ftqOffsetDiff === 2.U
316      val cond3 = !sameFtqPtr && ftqOffset1 === 0.U
317      val cond4 = !sameFtqPtr && ftqOffset1 === 1.U
318      rename.io.in(i).bits.commitType := Mux(cond1, 4.U, Mux(cond2, 5.U, Mux(cond3, 6.U, 7.U)))
319      XSError(!cond1 && !cond2 && !cond3 && !cond4, p"new condition $sameFtqPtr $ftqOffset0 $ftqOffset1\n")
320    }
321
322  }
323
324  // memory dependency predict
325  // when decode, send fold pc to mdp
326  private val mdpFlodPcVec = Wire(Vec(DecodeWidth, UInt(MemPredPCWidth.W)))
327  for (i <- 0 until DecodeWidth) {
328    mdpFlodPcVec(i) := Mux(
329      decode.io.out(i).fire,
330      decode.io.in(i).bits.foldpc,
331      rename.io.in(i).bits.foldpc
332    )
333  }
334
335  // currently, we only update mdp info when isReplay
336  memCtrl.io.redirect := s1_s3_redirect
337  memCtrl.io.csrCtrl := io.csrCtrl                          // RegNext in memCtrl
338  memCtrl.io.stIn := io.fromMem.stIn                        // RegNext in memCtrl
339  memCtrl.io.memPredUpdate := redirectGen.io.memPredUpdate  // RegNext in memCtrl
340  memCtrl.io.mdpFlodPcVec := mdpFlodPcVec
341  memCtrl.io.dispatchLFSTio <> dispatch.io.lfst
342
343  rat.io.redirect := s1_s3_redirect.valid
344  rat.io.robCommits := rob.io.rabCommits
345  rat.io.diffCommits := rob.io.diffCommits
346  rat.io.intRenamePorts := rename.io.intRenamePorts
347  rat.io.fpRenamePorts := rename.io.fpRenamePorts
348  rat.io.vecRenamePorts := rename.io.vecRenamePorts
349
350  rename.io.redirect := s1_s3_redirect
351  rename.io.robCommits <> rob.io.rabCommits
352  rename.io.waittable := (memCtrl.io.waitTable2Rename zip decode.io.out).map{ case(waittable2rename, decodeOut) =>
353    RegEnable(waittable2rename, decodeOut.fire)
354  }
355  rename.io.ssit := memCtrl.io.ssit2Rename
356  rename.io.intReadPorts := VecInit(rat.io.intReadPorts.map(x => VecInit(x.map(_.data))))
357  rename.io.fpReadPorts := VecInit(rat.io.fpReadPorts.map(x => VecInit(x.map(_.data))))
358  rename.io.vecReadPorts := VecInit(rat.io.vecReadPorts.map(x => VecInit(x.map(_.data))))
359  rename.io.int_need_free := rat.io.int_need_free
360  rename.io.int_old_pdest := rat.io.int_old_pdest
361  rename.io.fp_old_pdest := rat.io.fp_old_pdest
362  rename.io.vec_old_pdest := rat.io.vec_old_pdest
363  rename.io.debug_int_rat.foreach(_ := rat.io.debug_int_rat.get)
364  rename.io.debug_fp_rat.foreach(_ := rat.io.debug_fp_rat.get)
365  rename.io.debug_vec_rat.foreach(_ := rat.io.debug_vec_rat.get)
366  rename.io.debug_vconfig_rat.foreach(_ := rat.io.debug_vconfig_rat.get)
367  rename.io.stallReason.in <> decode.io.stallReason.out
368  rename.io.snpt.snptEnq := DontCare
369  rename.io.snpt.snptDeq := snpt.io.deq
370  rename.io.snpt.useSnpt := useSnpt
371  rename.io.snpt.snptSelect := snptSelect
372
373  // prevent rob from generating snapshot when full here
374  val renameOut = Wire(chiselTypeOf(rename.io.out))
375  renameOut <> rename.io.out
376  when(isFull(snpt.io.enqPtr, snpt.io.deqPtr)) {
377    renameOut.head.bits.snapshot := false.B
378  }
379
380
381  // pipeline between rename and dispatch
382  for (i <- 0 until RenameWidth) {
383    PipelineConnect(renameOut(i), dispatch.io.fromRename(i), dispatch.io.recv(i), s1_s3_redirect.valid)
384  }
385
386  dispatch.io.hartId := io.fromTop.hartId
387  dispatch.io.redirect := s1_s3_redirect
388  dispatch.io.enqRob <> rob.io.enq
389  dispatch.io.robHead := rob.io.debugRobHead
390  dispatch.io.stallReason <> rename.io.stallReason.out
391  dispatch.io.lqCanAccept := io.lqCanAccept
392  dispatch.io.sqCanAccept := io.sqCanAccept
393  dispatch.io.robHeadNotReady := rob.io.headNotReady
394  dispatch.io.robFull := rob.io.robFull
395  dispatch.io.singleStep := RegNext(io.csrCtrl.singlestep)
396
397  intDq.io.enq <> dispatch.io.toIntDq
398  intDq.io.redirect <> s2_s4_redirect
399
400  fpDq.io.enq <> dispatch.io.toFpDq
401  fpDq.io.redirect <> s2_s4_redirect
402
403  lsDq.io.enq <> dispatch.io.toLsDq
404  lsDq.io.redirect <> s2_s4_redirect
405
406  io.toIssueBlock.intUops <> intDq.io.deq
407  io.toIssueBlock.vfUops  <> fpDq.io.deq
408  io.toIssueBlock.memUops <> lsDq.io.deq
409  io.toIssueBlock.allocPregs <> dispatch.io.allocPregs
410  io.toIssueBlock.flush   <> s2_s4_redirect
411
412  pcMem.io.wen.head   := RegNext(io.frontend.fromFtq.pc_mem_wen)
413  pcMem.io.waddr.head := RegNext(io.frontend.fromFtq.pc_mem_waddr)
414  pcMem.io.wdata.head := RegNext(io.frontend.fromFtq.pc_mem_wdata)
415
416  private val jumpPcVec         : Vec[UInt] = Wire(Vec(params.numPcReadPort, UInt(VAddrData().dataWidth.W)))
417  io.toIssueBlock.pcVec := jumpPcVec
418
419  io.toDataPath.flush := s2_s4_redirect
420  io.toExuBlock.flush := s2_s4_redirect
421
422  for ((pcMemIdx, i) <- pcMemRdIndexes("exu").zipWithIndex) {
423    pcMem.io.raddr(pcMemIdx) := intDq.io.deqNext(i).ftqPtr.value
424    jumpPcVec(i) := pcMem.io.rdata(pcMemIdx).getPc(RegNext(intDq.io.deqNext(i).ftqOffset))
425  }
426
427  val dqOuts = Seq(io.toIssueBlock.intUops) ++ Seq(io.toIssueBlock.vfUops) ++ Seq(io.toIssueBlock.memUops)
428  dqOuts.zipWithIndex.foreach { case (dqOut, dqIdx) =>
429    dqOut.map(_.bits.pc).zipWithIndex.map{ case (pc, portIdx) =>
430      if(params.allSchdParams(dqIdx).numPcReadPort > 0){
431        val realJumpPcVec = jumpPcVec.drop(params.allSchdParams.take(dqIdx).map(_.numPcReadPort).sum).take(params.allSchdParams(dqIdx).numPcReadPort)
432        pc := realJumpPcVec(portIdx)
433      }
434    }
435  }
436
437  rob.io.hartId := io.fromTop.hartId
438  rob.io.redirect := s1_s3_redirect
439  rob.io.writeback := delayedNotFlushedWriteBack
440
441  io.redirect := s1_s3_redirect
442
443  // rob to int block
444  io.robio.csr <> rob.io.csr
445  // When wfi is disabled, it will not block ROB commit.
446  rob.io.csr.wfiEvent := io.robio.csr.wfiEvent
447  rob.io.wfi_enable := decode.io.csrCtrl.wfi_enable
448
449  io.toTop.cpuHalt := DelayN(rob.io.cpu_halt, 5)
450
451  io.robio.csr.perfinfo.retiredInstr <> RegNext(rob.io.csr.perfinfo.retiredInstr)
452  io.robio.exception := rob.io.exception
453  io.robio.exception.bits.pc := s1_robFlushPc
454
455  // rob to mem block
456  io.robio.lsq <> rob.io.lsq
457
458  io.debug_int_rat    .foreach(_ := rat.io.diff_int_rat.get)
459  io.debug_fp_rat     .foreach(_ := rat.io.diff_fp_rat.get)
460  io.debug_vec_rat    .foreach(_ := rat.io.diff_vec_rat.get)
461  io.debug_vconfig_rat.foreach(_ := rat.io.diff_vconfig_rat.get)
462
463  rob.io.debug_ls := io.robio.debug_ls
464  rob.io.debugHeadLsIssue := io.robio.robHeadLsIssue
465  rob.io.lsTopdownInfo := io.robio.lsTopdownInfo
466  io.robio.robDeqPtr := rob.io.robDeqPtr
467
468  io.debugTopDown.fromRob := rob.io.debugTopDown.toCore
469  dispatch.io.debugTopDown.fromRob := rob.io.debugTopDown.toDispatch
470  dispatch.io.debugTopDown.fromCore := io.debugTopDown.fromCore
471  io.debugRolling := rob.io.debugRolling
472
473  io.perfInfo.ctrlInfo.robFull := RegNext(rob.io.robFull)
474  io.perfInfo.ctrlInfo.intdqFull := RegNext(intDq.io.dqFull)
475  io.perfInfo.ctrlInfo.fpdqFull := RegNext(fpDq.io.dqFull)
476  io.perfInfo.ctrlInfo.lsdqFull := RegNext(lsDq.io.dqFull)
477
478  val pfevent = Module(new PFEvent)
479  pfevent.io.distribute_csr := RegNext(io.csrCtrl.distribute_csr)
480  val csrevents = pfevent.io.hpmevent.slice(8,16)
481
482  val perfinfo = IO(new Bundle(){
483    val perfEventsRs      = Input(Vec(params.IqCnt, new PerfEvent))
484    val perfEventsEu0     = Input(Vec(6, new PerfEvent))
485    val perfEventsEu1     = Input(Vec(6, new PerfEvent))
486  })
487
488  val allPerfEvents = Seq(decode, rename, dispatch, intDq, fpDq, lsDq, rob).flatMap(_.getPerf)
489  val hpmEvents = allPerfEvents ++ perfinfo.perfEventsEu0 ++ perfinfo.perfEventsEu1 ++ perfinfo.perfEventsRs
490  val perfEvents = HPerfMonitor(csrevents, hpmEvents).getPerfEvents
491  generatePerfEvent()
492}
493
494class CtrlBlockIO()(implicit p: Parameters, params: BackendParams) extends XSBundle {
495  val fromTop = new Bundle {
496    val hartId = Input(UInt(8.W))
497  }
498  val toTop = new Bundle {
499    val cpuHalt = Output(Bool())
500  }
501  val frontend = Flipped(new FrontendToCtrlIO())
502  val toIssueBlock = new Bundle {
503    val flush = ValidIO(new Redirect)
504    val allocPregs = Vec(RenameWidth, Output(new ResetPregStateReq))
505    val intUops = Vec(dpParams.IntDqDeqWidth, DecoupledIO(new DynInst))
506    val vfUops = Vec(dpParams.FpDqDeqWidth, DecoupledIO(new DynInst))
507    val memUops = Vec(dpParams.LsDqDeqWidth, DecoupledIO(new DynInst))
508    val pcVec = Output(Vec(params.numPcReadPort, UInt(VAddrData().dataWidth.W)))
509  }
510  val fromDataPath = new Bundle{
511    val vtype = Input(new VType)
512  }
513  val toDataPath = new Bundle {
514    val vtypeAddr = Output(UInt(PhyRegIdxWidth.W))
515    val flush = ValidIO(new Redirect)
516  }
517  val toExuBlock = new Bundle {
518    val flush = ValidIO(new Redirect)
519  }
520  val fromWB = new Bundle {
521    val wbData = Flipped(MixedVec(params.genWrite2CtrlBundles))
522  }
523  val redirect = ValidIO(new Redirect)
524  val fromMem = new Bundle {
525    val stIn = Vec(params.StaCnt, Flipped(ValidIO(new DynInst))) // use storeSetHit, ssid, robIdx
526    val violation = Flipped(ValidIO(new Redirect))
527  }
528  val memLdPcRead = Vec(params.LduCnt, Flipped(new FtqRead(UInt(VAddrBits.W))))
529  val memStPcRead = Vec(params.StaCnt, Flipped(new FtqRead(UInt(VAddrBits.W))))
530
531  val csrCtrl = Input(new CustomCSRCtrlIO)
532  val robio = new Bundle {
533    val csr = new RobCSRIO
534    val exception = ValidIO(new ExceptionInfo)
535    val lsq = new RobLsqIO
536    val lsTopdownInfo = Vec(params.LduCnt, Input(new LsTopdownInfo))
537    val debug_ls = Input(new DebugLSIO())
538    val robHeadLsIssue = Input(Bool())
539    val robDeqPtr = Output(new RobPtr)
540  }
541
542  val perfInfo = Output(new Bundle{
543    val ctrlInfo = new Bundle {
544      val robFull   = Bool()
545      val intdqFull = Bool()
546      val fpdqFull  = Bool()
547      val lsdqFull  = Bool()
548    }
549  })
550  val debug_int_rat     = if (params.debugEn) Some(Vec(32, Output(UInt(PhyRegIdxWidth.W)))) else None
551  val debug_fp_rat      = if (params.debugEn) Some(Vec(32, Output(UInt(PhyRegIdxWidth.W)))) else None
552  val debug_vec_rat     = if (params.debugEn) Some(Vec(32, Output(UInt(PhyRegIdxWidth.W)))) else None
553  val debug_vconfig_rat = if (params.debugEn) Some(Output(UInt(PhyRegIdxWidth.W))) else None // TODO: use me
554
555  val sqCanAccept = Input(Bool())
556  val lqCanAccept = Input(Bool())
557
558  val debugTopDown = new Bundle {
559    val fromRob = new RobCoreTopDownIO
560    val fromCore = new CoreDispatchTopDownIO
561  }
562  val debugRolling = new RobDebugRollingIO
563}
564
565class NamedIndexes(namedCnt: Seq[(String, Int)]) {
566  require(namedCnt.map(_._1).distinct.size == namedCnt.size, "namedCnt should not have the same name")
567
568  val maxIdx = namedCnt.map(_._2).sum
569  val nameRangeMap: Map[String, (Int, Int)] = namedCnt.indices.map { i =>
570    val begin = namedCnt.slice(0, i).map(_._2).sum
571    val end = begin + namedCnt(i)._2
572    (namedCnt(i)._1, (begin, end))
573  }.toMap
574
575  def apply(name: String): Seq[Int] = {
576    require(nameRangeMap.contains(name))
577    nameRangeMap(name)._1 until nameRangeMap(name)._2
578  }
579}
580