xref: /XiangShan/src/main/scala/xiangshan/backend/Backend.scala (revision 3088616cbf0793407bb68460b2db89b7de80c12a)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.backend
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import device.MsiInfoBundle
23import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp}
24import system.HasSoCParameter
25import utility._
26import utils.{HPerfMonitor, HasPerfEvents, PerfEvent}
27import xiangshan._
28import xiangshan.backend.Bundles.{DynInst, IssueQueueIQWakeUpBundle, LoadShouldCancel, MemExuInput, MemExuOutput, VPUCtrlSignals}
29import xiangshan.backend.ctrlblock.{DebugLSIO, LsTopdownInfo}
30import xiangshan.backend.datapath.DataConfig.{IntData, VecData, FpData}
31import xiangshan.backend.datapath.RdConfig.{IntRD, VfRD}
32import xiangshan.backend.datapath.WbConfig._
33import xiangshan.backend.datapath.DataConfig._
34import xiangshan.backend.datapath._
35import xiangshan.backend.dispatch.CoreDispatchTopDownIO
36import xiangshan.backend.exu.ExuBlock
37import xiangshan.backend.fu.vector.Bundles.{VConfig, VType}
38import xiangshan.backend.fu.{FenceIO, FenceToSbuffer, FuConfig, FuType, PFEvent, PerfCounterIO}
39import xiangshan.backend.issue.EntryBundles._
40import xiangshan.backend.issue.{CancelNetwork, Scheduler, SchedulerArithImp, SchedulerImpBase, SchedulerMemImp}
41import xiangshan.backend.rob.{RobCoreTopDownIO, RobDebugRollingIO, RobLsqIO, RobPtr}
42import xiangshan.frontend.{FtqPtr, FtqRead, PreDecodeInfo}
43import xiangshan.mem.{LqPtr, LsqEnqIO, SqPtr}
44
45import scala.collection.mutable
46
47class Backend(val params: BackendParams)(implicit p: Parameters) extends LazyModule
48  with HasXSParameter {
49
50  override def shouldBeInlined: Boolean = false
51
52  // check read & write port config
53  params.configChecks
54
55  /* Only update the idx in mem-scheduler here
56   * Idx in other schedulers can be updated the same way if needed
57   *
58   * Also note that we filter out the 'stData issue-queues' when counting
59   */
60  for ((ibp, idx) <- params.memSchdParams.get.issueBlockParams.filter(iq => iq.StdCnt == 0).zipWithIndex) {
61    ibp.updateIdx(idx)
62  }
63
64  println(params.iqWakeUpParams)
65
66  for ((schdCfg, i) <- params.allSchdParams.zipWithIndex) {
67    schdCfg.bindBackendParam(params)
68  }
69
70  for ((iqCfg, i) <- params.allIssueParams.zipWithIndex) {
71    iqCfg.bindBackendParam(params)
72  }
73
74  for ((exuCfg, i) <- params.allExuParams.zipWithIndex) {
75    exuCfg.bindBackendParam(params)
76    exuCfg.updateIQWakeUpConfigs(params.iqWakeUpParams)
77    exuCfg.updateExuIdx(i)
78  }
79
80  println("[Backend] ExuConfigs:")
81  for (exuCfg <- params.allExuParams) {
82    val fuConfigs = exuCfg.fuConfigs
83    val wbPortConfigs = exuCfg.wbPortConfigs
84    val immType = exuCfg.immType
85
86    println("[Backend]   " +
87      s"${exuCfg.name}: " +
88      (if (exuCfg.fakeUnit) "fake, " else "") +
89      (if (exuCfg.hasLoadFu || exuCfg.hasHyldaFu) s"LdExuIdx(${backendParams.getLdExuIdx(exuCfg)})" else "") +
90      s"${fuConfigs.map(_.name).mkString("fu(s): {", ",", "}")}, " +
91      s"${wbPortConfigs.mkString("wb: {", ",", "}")}, " +
92      s"${immType.map(SelImm.mkString(_)).mkString("imm: {", ",", "}")}, " +
93      s"latMax(${exuCfg.latencyValMax}), ${exuCfg.fuLatancySet.mkString("lat: {", ",", "}")}, " +
94      s"srcReg(${exuCfg.numRegSrc})"
95    )
96    require(
97      wbPortConfigs.collectFirst { case x: IntWB => x }.nonEmpty ==
98        fuConfigs.map(_.writeIntRf).reduce(_ || _),
99      s"${exuCfg.name} int wb port has no priority"
100    )
101    require(
102      wbPortConfigs.collectFirst { case x: FpWB => x }.nonEmpty ==
103        fuConfigs.map(x => x.writeFpRf).reduce(_ || _),
104      s"${exuCfg.name} fp wb port has no priority"
105    )
106    require(
107      wbPortConfigs.collectFirst { case x: VfWB => x }.nonEmpty ==
108        fuConfigs.map(x => x.writeVecRf).reduce(_ || _),
109      s"${exuCfg.name} vec wb port has no priority"
110    )
111  }
112
113  println(s"[Backend] all fu configs")
114  for (cfg <- FuConfig.allConfigs) {
115    println(s"[Backend]   $cfg")
116  }
117
118  println(s"[Backend] Int RdConfigs: ExuName(Priority)")
119  for ((port, seq) <- params.getRdPortParams(IntData())) {
120    println(s"[Backend]   port($port): ${seq.map(x => params.getExuName(x._1) + "(" + x._2.toString + ")").mkString(",")}")
121  }
122
123  println(s"[Backend] Int WbConfigs: ExuName(Priority)")
124  for ((port, seq) <- params.getWbPortParams(IntData())) {
125    println(s"[Backend]   port($port): ${seq.map(x => params.getExuName(x._1) + "(" + x._2.toString + ")").mkString(",")}")
126  }
127
128  println(s"[Backend] Fp RdConfigs: ExuName(Priority)")
129  for ((port, seq) <- params.getRdPortParams(FpData())) {
130    println(s"[Backend]   port($port): ${seq.map(x => params.getExuName(x._1) + "(" + x._2.toString + ")").mkString(",")}")
131  }
132
133  println(s"[Backend] Fp WbConfigs: ExuName(Priority)")
134  for ((port, seq) <- params.getWbPortParams(FpData())) {
135    println(s"[Backend]   port($port): ${seq.map(x => params.getExuName(x._1) + "(" + x._2.toString + ")").mkString(",")}")
136  }
137
138  println(s"[Backend] Vf RdConfigs: ExuName(Priority)")
139  for ((port, seq) <- params.getRdPortParams(VecData())) {
140    println(s"[Backend]   port($port): ${seq.map(x => params.getExuName(x._1) + "(" + x._2.toString + ")").mkString(",")}")
141  }
142
143  println(s"[Backend] Vf WbConfigs: ExuName(Priority)")
144  for ((port, seq) <- params.getWbPortParams(VecData())) {
145    println(s"[Backend]   port($port): ${seq.map(x => params.getExuName(x._1) + "(" + x._2.toString + ")").mkString(",")}")
146  }
147
148  println(s"[Backend] Dispatch Configs:")
149  println(s"[Backend] Load IQ enq width(${params.numLoadDp}), Store IQ enq width(${params.numStoreDp})")
150  println(s"[Backend] Load DP width(${LSQLdEnqWidth}), Store DP width(${LSQStEnqWidth})")
151
152  params.updateCopyPdestInfo
153  println(s"[Backend] copyPdestInfo ${params.copyPdestInfo}")
154  params.allExuParams.map(_.copyNum)
155  val ctrlBlock = LazyModule(new CtrlBlock(params))
156  val pcTargetMem = LazyModule(new PcTargetMem(params))
157  val intScheduler = params.intSchdParams.map(x => LazyModule(new Scheduler(x)))
158  val fpScheduler = params.fpSchdParams.map(x => LazyModule(new Scheduler(x)))
159  val vfScheduler = params.vfSchdParams.map(x => LazyModule(new Scheduler(x)))
160  val memScheduler = params.memSchdParams.map(x => LazyModule(new Scheduler(x)))
161  val dataPath = LazyModule(new DataPath(params))
162  val intExuBlock = params.intSchdParams.map(x => LazyModule(new ExuBlock(x)))
163  val fpExuBlock = params.fpSchdParams.map(x => LazyModule(new ExuBlock(x)))
164  val vfExuBlock = params.vfSchdParams.map(x => LazyModule(new ExuBlock(x)))
165  val wbFuBusyTable = LazyModule(new WbFuBusyTable(params))
166
167  lazy val module = new BackendImp(this)
168}
169
170class BackendImp(override val wrapper: Backend)(implicit p: Parameters) extends LazyModuleImp(wrapper)
171  with HasXSParameter
172  with HasPerfEvents {
173  implicit private val params: BackendParams = wrapper.params
174
175  val io = IO(new BackendIO()(p, wrapper.params))
176
177  private val ctrlBlock = wrapper.ctrlBlock.module
178  private val pcTargetMem = wrapper.pcTargetMem.module
179  private val intScheduler: SchedulerImpBase = wrapper.intScheduler.get.module
180  private val fpScheduler = wrapper.fpScheduler.get.module
181  private val vfScheduler = wrapper.vfScheduler.get.module
182  private val memScheduler = wrapper.memScheduler.get.module
183  private val dataPath = wrapper.dataPath.module
184  private val intExuBlock = wrapper.intExuBlock.get.module
185  private val fpExuBlock = wrapper.fpExuBlock.get.module
186  private val vfExuBlock = wrapper.vfExuBlock.get.module
187  private val og2ForVector = Module(new Og2ForVector(params))
188  private val bypassNetwork = Module(new BypassNetwork)
189  private val wbDataPath = Module(new WbDataPath(params))
190  private val wbFuBusyTable = wrapper.wbFuBusyTable.module
191
192  private val iqWakeUpMappedBundle: Map[Int, ValidIO[IssueQueueIQWakeUpBundle]] = (
193    intScheduler.io.toSchedulers.wakeupVec ++
194      fpScheduler.io.toSchedulers.wakeupVec ++
195      vfScheduler.io.toSchedulers.wakeupVec ++
196      memScheduler.io.toSchedulers.wakeupVec
197    ).map(x => (x.bits.exuIdx, x)).toMap
198
199  println(s"[Backend] iq wake up keys: ${iqWakeUpMappedBundle.keys}")
200
201  wbFuBusyTable.io.in.intSchdBusyTable := intScheduler.io.wbFuBusyTable
202  wbFuBusyTable.io.in.fpSchdBusyTable := fpScheduler.io.wbFuBusyTable
203  wbFuBusyTable.io.in.vfSchdBusyTable := vfScheduler.io.wbFuBusyTable
204  wbFuBusyTable.io.in.memSchdBusyTable := memScheduler.io.wbFuBusyTable
205  intScheduler.io.fromWbFuBusyTable.fuBusyTableRead := wbFuBusyTable.io.out.intRespRead
206  fpScheduler.io.fromWbFuBusyTable.fuBusyTableRead := wbFuBusyTable.io.out.fpRespRead
207  vfScheduler.io.fromWbFuBusyTable.fuBusyTableRead := wbFuBusyTable.io.out.vfRespRead
208  memScheduler.io.fromWbFuBusyTable.fuBusyTableRead := wbFuBusyTable.io.out.memRespRead
209  dataPath.io.wbConfictRead := wbFuBusyTable.io.out.wbConflictRead
210
211  private val og1Cancel = dataPath.io.og1Cancel
212  private val og0Cancel = dataPath.io.og0Cancel
213  private val vlIsZero = intExuBlock.io.vlIsZero.get
214  private val vlIsVlmax = intExuBlock.io.vlIsVlmax.get
215
216  ctrlBlock.io.intIQValidNumVec := intScheduler.io.intIQValidNumVec
217  ctrlBlock.io.fpIQValidNumVec := fpScheduler.io.fpIQValidNumVec
218  ctrlBlock.io.fromTop.hartId := io.fromTop.hartId
219  ctrlBlock.io.frontend <> io.frontend
220  ctrlBlock.io.fromCSR.toDecode := intExuBlock.io.csrToDecode.get
221  ctrlBlock.io.fromWB.wbData <> wbDataPath.io.toCtrlBlock.writeback
222  ctrlBlock.io.fromMem.stIn <> io.mem.stIn
223  ctrlBlock.io.fromMem.violation <> io.mem.memoryViolation
224  ctrlBlock.io.lqCanAccept := io.mem.lqCanAccept
225  ctrlBlock.io.sqCanAccept := io.mem.sqCanAccept
226  ctrlBlock.io.csrCtrl <> intExuBlock.io.csrio.get.customCtrl
227  ctrlBlock.io.robio.csr.intrBitSet := intExuBlock.io.csrio.get.interrupt
228  ctrlBlock.io.robio.csr.trapTarget := intExuBlock.io.csrio.get.trapTarget
229  ctrlBlock.io.robio.csr.isXRet := intExuBlock.io.csrio.get.isXRet
230  ctrlBlock.io.robio.csr.wfiEvent := intExuBlock.io.csrio.get.wfi_event
231  ctrlBlock.io.robio.lsq <> io.mem.robLsqIO
232  ctrlBlock.io.robio.lsTopdownInfo <> io.mem.lsTopdownInfo
233  ctrlBlock.io.robio.debug_ls <> io.mem.debugLS
234  ctrlBlock.io.debugEnqLsq.canAccept := io.mem.lsqEnqIO.canAccept
235  ctrlBlock.io.debugEnqLsq.resp := io.mem.lsqEnqIO.resp
236  ctrlBlock.io.debugEnqLsq.req := memScheduler.io.memIO.get.lsqEnqIO.req
237  ctrlBlock.io.debugEnqLsq.needAlloc := memScheduler.io.memIO.get.lsqEnqIO.needAlloc
238
239  intScheduler.io.fromTop.hartId := io.fromTop.hartId
240  intScheduler.io.fromCtrlBlock.flush := ctrlBlock.io.toIssueBlock.flush
241  intScheduler.io.fromDispatch.allocPregs <> ctrlBlock.io.toIssueBlock.allocPregs
242  intScheduler.io.fromDispatch.uops <> ctrlBlock.io.toIssueBlock.intUops
243  intScheduler.io.intWriteBack := wbDataPath.io.toIntPreg
244  intScheduler.io.fpWriteBack := 0.U.asTypeOf(intScheduler.io.fpWriteBack)
245  intScheduler.io.vfWriteBack := 0.U.asTypeOf(intScheduler.io.vfWriteBack)
246  intScheduler.io.v0WriteBack := 0.U.asTypeOf(intScheduler.io.v0WriteBack)
247  intScheduler.io.vlWriteBack := 0.U.asTypeOf(intScheduler.io.vlWriteBack)
248  intScheduler.io.fromDataPath.resp := dataPath.io.toIntIQ
249  intScheduler.io.fromSchedulers.wakeupVec.foreach { wakeup => wakeup := iqWakeUpMappedBundle(wakeup.bits.exuIdx) }
250  intScheduler.io.fromDataPath.og0Cancel := og0Cancel
251  intScheduler.io.fromDataPath.og1Cancel := og1Cancel
252  intScheduler.io.ldCancel := io.mem.ldCancel
253  intScheduler.io.fromDataPath.replaceRCIdx.get := dataPath.io.toWakeupQueueRCIdx.take(params.getIntExuRCWriteSize)
254  intScheduler.io.vlWriteBackInfo.vlIsZero := false.B
255  intScheduler.io.vlWriteBackInfo.vlIsVlmax := false.B
256
257  fpScheduler.io.fromTop.hartId := io.fromTop.hartId
258  fpScheduler.io.fromCtrlBlock.flush := ctrlBlock.io.toIssueBlock.flush
259  fpScheduler.io.fromDispatch.allocPregs <> ctrlBlock.io.toIssueBlock.allocPregs
260  fpScheduler.io.fromDispatch.uops <> ctrlBlock.io.toIssueBlock.fpUops
261  fpScheduler.io.intWriteBack := 0.U.asTypeOf(fpScheduler.io.intWriteBack)
262  fpScheduler.io.fpWriteBack := wbDataPath.io.toFpPreg
263  fpScheduler.io.vfWriteBack := 0.U.asTypeOf(fpScheduler.io.vfWriteBack)
264  fpScheduler.io.v0WriteBack := 0.U.asTypeOf(fpScheduler.io.v0WriteBack)
265  fpScheduler.io.vlWriteBack := 0.U.asTypeOf(fpScheduler.io.vlWriteBack)
266  fpScheduler.io.fromDataPath.resp := dataPath.io.toFpIQ
267  fpScheduler.io.fromSchedulers.wakeupVec.foreach { wakeup => wakeup := iqWakeUpMappedBundle(wakeup.bits.exuIdx) }
268  fpScheduler.io.fromDataPath.og0Cancel := og0Cancel
269  fpScheduler.io.fromDataPath.og1Cancel := og1Cancel
270  fpScheduler.io.ldCancel := io.mem.ldCancel
271  fpScheduler.io.vlWriteBackInfo.vlIsZero := false.B
272  fpScheduler.io.vlWriteBackInfo.vlIsVlmax := false.B
273
274  memScheduler.io.fromTop.hartId := io.fromTop.hartId
275  memScheduler.io.fromCtrlBlock.flush := ctrlBlock.io.toIssueBlock.flush
276  memScheduler.io.fromDispatch.allocPregs <> ctrlBlock.io.toIssueBlock.allocPregs
277  memScheduler.io.fromDispatch.uops <> ctrlBlock.io.toIssueBlock.memUops
278  memScheduler.io.intWriteBack := wbDataPath.io.toIntPreg
279  memScheduler.io.fpWriteBack := wbDataPath.io.toFpPreg
280  memScheduler.io.vfWriteBack := wbDataPath.io.toVfPreg
281  memScheduler.io.v0WriteBack := wbDataPath.io.toV0Preg
282  memScheduler.io.vlWriteBack := wbDataPath.io.toVlPreg
283  memScheduler.io.fromMem.get.scommit := io.mem.sqDeq
284  memScheduler.io.fromMem.get.lcommit := io.mem.lqDeq
285  memScheduler.io.fromMem.get.wakeup := io.mem.wakeup
286  memScheduler.io.fromMem.get.sqDeqPtr := io.mem.sqDeqPtr
287  memScheduler.io.fromMem.get.lqDeqPtr := io.mem.lqDeqPtr
288  memScheduler.io.fromMem.get.sqCancelCnt := io.mem.sqCancelCnt
289  memScheduler.io.fromMem.get.lqCancelCnt := io.mem.lqCancelCnt
290  memScheduler.io.fromMem.get.stIssuePtr := io.mem.stIssuePtr
291  require(memScheduler.io.fromMem.get.memWaitUpdateReq.robIdx.length == io.mem.stIn.length)
292  memScheduler.io.fromMem.get.memWaitUpdateReq.robIdx.zip(io.mem.stIn).foreach { case (sink, source) =>
293    sink.valid := source.valid
294    sink.bits  := source.bits.robIdx
295  }
296  memScheduler.io.fromMem.get.memWaitUpdateReq.sqIdx := DontCare // TODO
297  memScheduler.io.fromDataPath.resp := dataPath.io.toMemIQ
298  memScheduler.io.fromMem.get.ldaFeedback := io.mem.ldaIqFeedback
299  memScheduler.io.fromMem.get.staFeedback := io.mem.staIqFeedback
300  memScheduler.io.fromMem.get.hyuFeedback := io.mem.hyuIqFeedback
301  memScheduler.io.fromMem.get.vstuFeedback := io.mem.vstuIqFeedback
302  memScheduler.io.fromMem.get.vlduFeedback := io.mem.vlduIqFeedback
303  memScheduler.io.fromSchedulers.wakeupVec.foreach { wakeup => wakeup := iqWakeUpMappedBundle(wakeup.bits.exuIdx) }
304  memScheduler.io.fromDataPath.og0Cancel := og0Cancel
305  memScheduler.io.fromDataPath.og1Cancel := og1Cancel
306  memScheduler.io.ldCancel := io.mem.ldCancel
307  memScheduler.io.fromDataPath.replaceRCIdx.get := dataPath.io.toWakeupQueueRCIdx.takeRight(params.getMemExuRCWriteSize)
308  memScheduler.io.vlWriteBackInfo.vlIsZero := vlIsZero
309  memScheduler.io.vlWriteBackInfo.vlIsVlmax := vlIsVlmax
310  memScheduler.io.fromOg2Resp.get := og2ForVector.io.toMemIQOg2Resp
311
312  vfScheduler.io.fromTop.hartId := io.fromTop.hartId
313  vfScheduler.io.fromCtrlBlock.flush := ctrlBlock.io.toIssueBlock.flush
314  vfScheduler.io.fromDispatch.allocPregs <> ctrlBlock.io.toIssueBlock.allocPregs
315  vfScheduler.io.fromDispatch.uops <> ctrlBlock.io.toIssueBlock.vfUops
316  vfScheduler.io.intWriteBack := 0.U.asTypeOf(vfScheduler.io.intWriteBack)
317  vfScheduler.io.fpWriteBack := 0.U.asTypeOf(vfScheduler.io.fpWriteBack)
318  vfScheduler.io.vfWriteBack := wbDataPath.io.toVfPreg
319  vfScheduler.io.v0WriteBack := wbDataPath.io.toV0Preg
320  vfScheduler.io.vlWriteBack := wbDataPath.io.toVlPreg
321  vfScheduler.io.fromDataPath.resp := dataPath.io.toVfIQ
322  vfScheduler.io.fromSchedulers.wakeupVec.foreach { wakeup => wakeup := iqWakeUpMappedBundle(wakeup.bits.exuIdx) }
323  vfScheduler.io.fromDataPath.og0Cancel := og0Cancel
324  vfScheduler.io.fromDataPath.og1Cancel := og1Cancel
325  vfScheduler.io.ldCancel := io.mem.ldCancel
326  vfScheduler.io.vlWriteBackInfo.vlIsZero := vlIsZero
327  vfScheduler.io.vlWriteBackInfo.vlIsVlmax := vlIsVlmax
328  vfScheduler.io.fromOg2Resp.get := og2ForVector.io.toVfIQOg2Resp
329
330  dataPath.io.hartId := io.fromTop.hartId
331  dataPath.io.flush := ctrlBlock.io.toDataPath.flush
332
333  dataPath.io.fromIntIQ <> intScheduler.io.toDataPathAfterDelay
334  dataPath.io.fromFpIQ <> fpScheduler.io.toDataPathAfterDelay
335  dataPath.io.fromVfIQ <> vfScheduler.io.toDataPathAfterDelay
336  dataPath.io.fromMemIQ <> memScheduler.io.toDataPathAfterDelay
337
338  dataPath.io.ldCancel := io.mem.ldCancel
339
340  println(s"[Backend] wbDataPath.io.toIntPreg: ${wbDataPath.io.toIntPreg.size}, dataPath.io.fromIntWb: ${dataPath.io.fromIntWb.size}")
341  println(s"[Backend] wbDataPath.io.toVfPreg: ${wbDataPath.io.toVfPreg.size}, dataPath.io.fromFpWb: ${dataPath.io.fromVfWb.size}")
342  dataPath.io.fromIntWb := wbDataPath.io.toIntPreg
343  dataPath.io.fromFpWb := wbDataPath.io.toFpPreg
344  dataPath.io.fromVfWb := wbDataPath.io.toVfPreg
345  dataPath.io.fromV0Wb := wbDataPath.io.toV0Preg
346  dataPath.io.fromVlWb := wbDataPath.io.toVlPreg
347  dataPath.io.debugIntRat    .foreach(_ := ctrlBlock.io.debug_int_rat.get)
348  dataPath.io.debugFpRat     .foreach(_ := ctrlBlock.io.debug_fp_rat.get)
349  dataPath.io.debugVecRat    .foreach(_ := ctrlBlock.io.debug_vec_rat.get)
350  dataPath.io.debugV0Rat     .foreach(_ := ctrlBlock.io.debug_v0_rat.get)
351  dataPath.io.debugVlRat     .foreach(_ := ctrlBlock.io.debug_vl_rat.get)
352  dataPath.io.fromBypassNetwork := bypassNetwork.io.toDataPath
353
354  og2ForVector.io.flush := ctrlBlock.io.toDataPath.flush
355  og2ForVector.io.ldCancel := io.mem.ldCancel
356  og2ForVector.io.fromOg1VfArith <> dataPath.io.toVecExu
357  og2ForVector.io.fromOg1VecMem.zip(dataPath.io.toMemExu.zip(params.memSchdParams.get.issueBlockParams).filter(_._2.needOg2Resp).map(_._1))
358    .foreach {
359      case (og1Mem, datapathMem) => og1Mem <> datapathMem
360    }
361  og2ForVector.io.fromOg1ImmInfo := dataPath.io.og1ImmInfo.zip(params.allExuParams).filter(_._2.needOg2).map(_._1)
362
363  println(s"[Backend] BypassNetwork OG1 Mem Size: ${bypassNetwork.io.fromDataPath.mem.zip(params.memSchdParams.get.issueBlockParams).filterNot(_._2.needOg2Resp).size}")
364  println(s"[Backend] BypassNetwork OG2 Mem Size: ${bypassNetwork.io.fromDataPath.mem.zip(params.memSchdParams.get.issueBlockParams).filter(_._2.needOg2Resp).size}")
365  println(s"[Backend] bypassNetwork.io.fromDataPath.mem: ${bypassNetwork.io.fromDataPath.mem.size}, dataPath.io.toMemExu: ${dataPath.io.toMemExu.size}")
366  bypassNetwork.io.fromDataPath.int <> dataPath.io.toIntExu
367  bypassNetwork.io.fromDataPath.fp <> dataPath.io.toFpExu
368  bypassNetwork.io.fromDataPath.vf <> og2ForVector.io.toVfArithExu
369  bypassNetwork.io.fromDataPath.mem.lazyZip(params.memSchdParams.get.issueBlockParams).lazyZip(dataPath.io.toMemExu).filterNot(_._2.needOg2Resp)
370    .map(x => (x._1, x._3)).foreach {
371      case (bypassMem, datapathMem) => bypassMem <> datapathMem
372    }
373  bypassNetwork.io.fromDataPath.mem.zip(params.memSchdParams.get.issueBlockParams).filter(_._2.needOg2Resp).map(_._1)
374    .zip(og2ForVector.io.toVecMemExu).foreach {
375      case (bypassMem, og2Mem) => bypassMem <> og2Mem
376    }
377  bypassNetwork.io.fromDataPath.immInfo := dataPath.io.og1ImmInfo
378  bypassNetwork.io.fromDataPath.immInfo.zip(params.allExuParams).filter(_._2.needOg2).map(_._1)
379    .zip(og2ForVector.io.toBypassNetworkImmInfo).foreach {
380      case (immInfo, og2ImmInfo) => immInfo := og2ImmInfo
381    }
382  bypassNetwork.io.fromDataPath.rcData := dataPath.io.toBypassNetworkRCData
383  bypassNetwork.io.fromExus.connectExuOutput(_.int)(intExuBlock.io.out)
384  bypassNetwork.io.fromExus.connectExuOutput(_.fp)(fpExuBlock.io.out)
385  bypassNetwork.io.fromExus.connectExuOutput(_.vf)(vfExuBlock.io.out)
386
387  require(bypassNetwork.io.fromExus.mem.flatten.size == io.mem.writeBack.size,
388    s"bypassNetwork.io.fromExus.mem.flatten.size(${bypassNetwork.io.fromExus.mem.flatten.size}: ${bypassNetwork.io.fromExus.mem.map(_.size)}, " +
389    s"io.mem.writeback(${io.mem.writeBack.size})"
390  )
391  bypassNetwork.io.fromExus.mem.flatten.zip(io.mem.writeBack).foreach { case (sink, source) =>
392    sink.valid := source.valid
393    sink.bits.intWen := source.bits.uop.rfWen && FuType.isLoad(source.bits.uop.fuType)
394    sink.bits.pdest := source.bits.uop.pdest
395    sink.bits.data := source.bits.data
396  }
397
398
399  intExuBlock.io.flush := ctrlBlock.io.toExuBlock.flush
400  for (i <- 0 until intExuBlock.io.in.length) {
401    for (j <- 0 until intExuBlock.io.in(i).length) {
402      val shouldLdCancel = LoadShouldCancel(bypassNetwork.io.toExus.int(i)(j).bits.loadDependency, io.mem.ldCancel)
403      NewPipelineConnect(
404        bypassNetwork.io.toExus.int(i)(j), intExuBlock.io.in(i)(j), intExuBlock.io.in(i)(j).fire,
405        Mux(
406          bypassNetwork.io.toExus.int(i)(j).fire,
407          bypassNetwork.io.toExus.int(i)(j).bits.robIdx.needFlush(ctrlBlock.io.toExuBlock.flush) || shouldLdCancel,
408          intExuBlock.io.in(i)(j).bits.robIdx.needFlush(ctrlBlock.io.toExuBlock.flush)
409        ),
410        Option("bypassNetwork2intExuBlock")
411      )
412    }
413  }
414
415  pcTargetMem.io.fromFrontendFtq := io.frontend.fromFtq
416  pcTargetMem.io.toDataPath <> dataPath.io.fromPcTargetMem
417
418  private val csrin = intExuBlock.io.csrin.get
419  csrin.hartId := io.fromTop.hartId
420  csrin.msiInfo.valid := RegNext(io.fromTop.msiInfo.valid)
421  csrin.msiInfo.bits := RegEnable(io.fromTop.msiInfo.bits, io.fromTop.msiInfo.valid)
422  csrin.clintTime.valid := RegNext(io.fromTop.clintTime.valid)
423  csrin.clintTime.bits := RegEnable(io.fromTop.clintTime.bits, io.fromTop.clintTime.valid)
424  csrin.trapInstInfo := ctrlBlock.io.toCSR.trapInstInfo
425
426  private val csrio = intExuBlock.io.csrio.get
427  csrio.hartId := io.fromTop.hartId
428  csrio.fpu.fflags := ctrlBlock.io.robio.csr.fflags
429  csrio.fpu.isIllegal := false.B // Todo: remove it
430  csrio.fpu.dirty_fs := ctrlBlock.io.robio.csr.dirty_fs
431  csrio.vpu <> WireDefault(0.U.asTypeOf(csrio.vpu)) // Todo
432
433  val fromIntExuVsetVType = intExuBlock.io.vtype.getOrElse(0.U.asTypeOf((Valid(new VType))))
434  val fromVfExuVsetVType = vfExuBlock.io.vtype.getOrElse(0.U.asTypeOf((Valid(new VType))))
435  val fromVsetVType = Mux(fromIntExuVsetVType.valid, fromIntExuVsetVType.bits, fromVfExuVsetVType.bits)
436  val vsetvlVType = RegEnable(fromVsetVType, 0.U.asTypeOf(new VType), fromIntExuVsetVType.valid || fromVfExuVsetVType.valid)
437  ctrlBlock.io.toDecode.vsetvlVType := vsetvlVType
438
439  val commitVType = ctrlBlock.io.robio.commitVType.vtype
440  val hasVsetvl = ctrlBlock.io.robio.commitVType.hasVsetvl
441  val vtype = VType.toVtypeStruct(Mux(hasVsetvl, vsetvlVType, commitVType.bits)).asUInt
442
443  // csr not store the value of vl, so when using difftest we assign the value of vl to debugVl
444  val debugVl_s0 = WireInit(UInt(VlData().dataWidth.W), 0.U)
445  val debugVl_s1 = WireInit(UInt(VlData().dataWidth.W), 0.U)
446  debugVl_s0 := dataPath.io.debugVl.getOrElse(0.U.asTypeOf(UInt(VlData().dataWidth.W)))
447  debugVl_s1 := RegNext(debugVl_s0)
448  csrio.vpu.set_vxsat := ctrlBlock.io.robio.csr.vxsat
449  csrio.vpu.set_vstart.valid := ctrlBlock.io.robio.csr.vstart.valid
450  csrio.vpu.set_vstart.bits := ctrlBlock.io.robio.csr.vstart.bits
451  ctrlBlock.io.toDecode.vstart := csrio.vpu.vstart
452  //Todo here need change design
453  csrio.vpu.set_vtype.valid := commitVType.valid
454  csrio.vpu.set_vtype.bits := ZeroExt(vtype, XLEN)
455  csrio.vpu.vl := ZeroExt(debugVl_s1, XLEN)
456  csrio.vpu.dirty_vs := ctrlBlock.io.robio.csr.dirty_vs
457  csrio.exception := ctrlBlock.io.robio.exception
458  csrio.robDeqPtr := ctrlBlock.io.robio.robDeqPtr
459  csrio.memExceptionVAddr := io.mem.exceptionAddr.vaddr
460  csrio.memExceptionGPAddr := io.mem.exceptionAddr.gpaddr
461  csrio.externalInterrupt := RegNext(io.fromTop.externalInterrupt)
462  csrio.perf <> io.perf
463  csrio.perf.retiredInstr <> ctrlBlock.io.robio.csr.perfinfo.retiredInstr
464  csrio.perf.ctrlInfo <> ctrlBlock.io.perfInfo.ctrlInfo
465  private val fenceio = intExuBlock.io.fenceio.get
466  io.fenceio <> fenceio
467
468  // to fpExuBlock
469  fpExuBlock.io.flush := ctrlBlock.io.toExuBlock.flush
470  for (i <- 0 until fpExuBlock.io.in.length) {
471    for (j <- 0 until fpExuBlock.io.in(i).length) {
472      val shouldLdCancel = LoadShouldCancel(bypassNetwork.io.toExus.fp(i)(j).bits.loadDependency, io.mem.ldCancel)
473      NewPipelineConnect(
474        bypassNetwork.io.toExus.fp(i)(j), fpExuBlock.io.in(i)(j), fpExuBlock.io.in(i)(j).fire,
475        Mux(
476          bypassNetwork.io.toExus.fp(i)(j).fire,
477          bypassNetwork.io.toExus.fp(i)(j).bits.robIdx.needFlush(ctrlBlock.io.toExuBlock.flush) || shouldLdCancel,
478          fpExuBlock.io.in(i)(j).bits.robIdx.needFlush(ctrlBlock.io.toExuBlock.flush)
479        ),
480        Option("bypassNetwork2fpExuBlock")
481      )
482    }
483  }
484
485  vfExuBlock.io.flush := ctrlBlock.io.toExuBlock.flush
486  for (i <- 0 until vfExuBlock.io.in.size) {
487    for (j <- 0 until vfExuBlock.io.in(i).size) {
488      val shouldLdCancel = LoadShouldCancel(bypassNetwork.io.toExus.vf(i)(j).bits.loadDependency, io.mem.ldCancel)
489      NewPipelineConnect(
490        bypassNetwork.io.toExus.vf(i)(j), vfExuBlock.io.in(i)(j), vfExuBlock.io.in(i)(j).fire,
491        Mux(
492          bypassNetwork.io.toExus.vf(i)(j).fire,
493          bypassNetwork.io.toExus.vf(i)(j).bits.robIdx.needFlush(ctrlBlock.io.toExuBlock.flush) || shouldLdCancel,
494          vfExuBlock.io.in(i)(j).bits.robIdx.needFlush(ctrlBlock.io.toExuBlock.flush)
495        ),
496        Option("bypassNetwork2vfExuBlock")
497      )
498
499    }
500  }
501
502  intExuBlock.io.frm.foreach(_ := csrio.fpu.frm)
503  fpExuBlock.io.frm.foreach(_ := csrio.fpu.frm)
504  fpExuBlock.io.vxrm.foreach(_ := csrio.vpu.vxrm)
505  vfExuBlock.io.frm.foreach(_ := csrio.fpu.frm)
506  vfExuBlock.io.vxrm.foreach(_ := csrio.vpu.vxrm)
507
508  wbDataPath.io.flush := ctrlBlock.io.redirect
509  wbDataPath.io.fromTop.hartId := io.fromTop.hartId
510  wbDataPath.io.fromIntExu <> intExuBlock.io.out
511  wbDataPath.io.fromFpExu <> fpExuBlock.io.out
512  wbDataPath.io.fromVfExu <> vfExuBlock.io.out
513  wbDataPath.io.fromMemExu.flatten.zip(io.mem.writeBack).foreach { case (sink, source) =>
514    sink.valid := source.valid
515    source.ready := sink.ready
516    sink.bits.data   := VecInit(Seq.fill(sink.bits.params.wbPathNum)(source.bits.data))
517    sink.bits.pdest  := source.bits.uop.pdest
518    sink.bits.robIdx := source.bits.uop.robIdx
519    sink.bits.intWen.foreach(_ := source.bits.uop.rfWen)
520    sink.bits.fpWen.foreach(_ := source.bits.uop.fpWen)
521    sink.bits.vecWen.foreach(_ := source.bits.uop.vecWen)
522    sink.bits.v0Wen.foreach(_ := source.bits.uop.v0Wen)
523    sink.bits.vlWen.foreach(_ := source.bits.uop.vlWen)
524    sink.bits.exceptionVec.foreach(_ := source.bits.uop.exceptionVec)
525    sink.bits.flushPipe.foreach(_ := source.bits.uop.flushPipe)
526    sink.bits.replay.foreach(_ := source.bits.uop.replayInst)
527    sink.bits.debug := source.bits.debug
528    sink.bits.debugInfo := source.bits.uop.debugInfo
529    sink.bits.lqIdx.foreach(_ := source.bits.uop.lqIdx)
530    sink.bits.sqIdx.foreach(_ := source.bits.uop.sqIdx)
531    sink.bits.predecodeInfo.foreach(_ := source.bits.uop.preDecodeInfo)
532    sink.bits.vls.foreach(x => {
533      x.vdIdx := source.bits.vdIdx.get
534      x.vdIdxInField := source.bits.vdIdxInField.get
535      x.vpu   := source.bits.uop.vpu
536      x.oldVdPsrc := source.bits.uop.psrc(2)
537      x.isIndexed := VlduType.isIndexed(source.bits.uop.fuOpType)
538      x.isMasked := VlduType.isMasked(source.bits.uop.fuOpType)
539    })
540    sink.bits.trigger.foreach(_ := source.bits.uop.trigger)
541  }
542
543  // to mem
544  private val memIssueParams = params.memSchdParams.get.issueBlockParams
545  private val memExuBlocksHasLDU = memIssueParams.map(_.exuBlockParams.map(x => x.hasLoadFu || x.hasHyldaFu))
546  private val memExuBlocksHasVecLoad = memIssueParams.map(_.exuBlockParams.map(x => x.hasVLoadFu))
547  println(s"[Backend] memExuBlocksHasLDU: $memExuBlocksHasLDU")
548  println(s"[Backend] memExuBlocksHasVecLoad: $memExuBlocksHasVecLoad")
549
550  private val toMem = Wire(bypassNetwork.io.toExus.mem.cloneType)
551  for (i <- toMem.indices) {
552    for (j <- toMem(i).indices) {
553      val shouldLdCancel = LoadShouldCancel(bypassNetwork.io.toExus.mem(i)(j).bits.loadDependency, io.mem.ldCancel)
554      val issueTimeout =
555        if (memExuBlocksHasLDU(i)(j))
556          Counter(0 until 16, toMem(i)(j).valid && !toMem(i)(j).fire, bypassNetwork.io.toExus.mem(i)(j).fire)._2
557        else
558          false.B
559
560      if (memScheduler.io.loadFinalIssueResp(i).nonEmpty && memExuBlocksHasLDU(i)(j)) {
561        memScheduler.io.loadFinalIssueResp(i)(j).valid := issueTimeout
562        memScheduler.io.loadFinalIssueResp(i)(j).bits.fuType := toMem(i)(j).bits.fuType
563        memScheduler.io.loadFinalIssueResp(i)(j).bits.resp := RespType.block
564        memScheduler.io.loadFinalIssueResp(i)(j).bits.robIdx := toMem(i)(j).bits.robIdx
565        memScheduler.io.loadFinalIssueResp(i)(j).bits.uopIdx.foreach(_ := toMem(i)(j).bits.vpu.get.vuopIdx)
566        memScheduler.io.loadFinalIssueResp(i)(j).bits.sqIdx.foreach(_ := toMem(i)(j).bits.sqIdx.get)
567        memScheduler.io.loadFinalIssueResp(i)(j).bits.lqIdx.foreach(_ := toMem(i)(j).bits.lqIdx.get)
568      }
569
570      NewPipelineConnect(
571        bypassNetwork.io.toExus.mem(i)(j), toMem(i)(j), toMem(i)(j).fire,
572        Mux(
573          bypassNetwork.io.toExus.mem(i)(j).fire,
574          bypassNetwork.io.toExus.mem(i)(j).bits.robIdx.needFlush(ctrlBlock.io.toExuBlock.flush) || shouldLdCancel,
575          toMem(i)(j).bits.robIdx.needFlush(ctrlBlock.io.toExuBlock.flush) || issueTimeout
576        ),
577        Option("bypassNetwork2toMemExus")
578      )
579
580      if (memScheduler.io.memAddrIssueResp(i).nonEmpty && memExuBlocksHasLDU(i)(j)) {
581        memScheduler.io.memAddrIssueResp(i)(j).valid := toMem(i)(j).fire && FuType.isLoad(toMem(i)(j).bits.fuType)
582        memScheduler.io.memAddrIssueResp(i)(j).bits.fuType := toMem(i)(j).bits.fuType
583        memScheduler.io.memAddrIssueResp(i)(j).bits.robIdx := toMem(i)(j).bits.robIdx
584        memScheduler.io.memAddrIssueResp(i)(j).bits.sqIdx.foreach(_ := toMem(i)(j).bits.sqIdx.get)
585        memScheduler.io.memAddrIssueResp(i)(j).bits.lqIdx.foreach(_ := toMem(i)(j).bits.lqIdx.get)
586        memScheduler.io.memAddrIssueResp(i)(j).bits.resp := RespType.success // for load inst, firing at toMem means issuing successfully
587      }
588
589      if (memScheduler.io.vecLoadIssueResp(i).nonEmpty && memExuBlocksHasVecLoad(i)(j)) {
590        memScheduler.io.vecLoadIssueResp(i)(j) match {
591          case resp =>
592            resp.valid := toMem(i)(j).fire && VlduType.isVecLd(toMem(i)(j).bits.fuOpType)
593            resp.bits.fuType := toMem(i)(j).bits.fuType
594            resp.bits.robIdx := toMem(i)(j).bits.robIdx
595            resp.bits.uopIdx.get := toMem(i)(j).bits.vpu.get.vuopIdx
596            resp.bits.sqIdx.get := toMem(i)(j).bits.sqIdx.get
597            resp.bits.lqIdx.get := toMem(i)(j).bits.lqIdx.get
598            resp.bits.resp := RespType.success
599        }
600        if (backendParams.debugEn){
601          dontTouch(memScheduler.io.vecLoadIssueResp(i)(j))
602        }
603      }
604    }
605  }
606
607  io.mem.redirect := ctrlBlock.io.redirect
608  io.mem.issueUops.zip(toMem.flatten).foreach { case (sink, source) =>
609    val enableMdp = Constantin.createRecord("EnableMdp", true)
610    sink.valid := source.valid
611    source.ready := sink.ready
612    sink.bits.iqIdx              := source.bits.iqIdx
613    sink.bits.isFirstIssue       := source.bits.isFirstIssue
614    sink.bits.uop                := 0.U.asTypeOf(sink.bits.uop)
615    sink.bits.src                := 0.U.asTypeOf(sink.bits.src)
616    sink.bits.src.zip(source.bits.src).foreach { case (l, r) => l := r}
617    sink.bits.uop.fuType         := source.bits.fuType
618    sink.bits.uop.fuOpType       := source.bits.fuOpType
619    sink.bits.uop.imm            := source.bits.imm
620    sink.bits.uop.robIdx         := source.bits.robIdx
621    sink.bits.uop.pdest          := source.bits.pdest
622    sink.bits.uop.rfWen          := source.bits.rfWen.getOrElse(false.B)
623    sink.bits.uop.fpWen          := source.bits.fpWen.getOrElse(false.B)
624    sink.bits.uop.vecWen         := source.bits.vecWen.getOrElse(false.B)
625    sink.bits.uop.v0Wen          := source.bits.v0Wen.getOrElse(false.B)
626    sink.bits.uop.vlWen          := source.bits.vlWen.getOrElse(false.B)
627    sink.bits.uop.flushPipe      := source.bits.flushPipe.getOrElse(false.B)
628    sink.bits.uop.pc             := source.bits.pc.getOrElse(0.U)
629    sink.bits.uop.loadWaitBit    := Mux(enableMdp, source.bits.loadWaitBit.getOrElse(false.B), false.B)
630    sink.bits.uop.waitForRobIdx  := Mux(enableMdp, source.bits.waitForRobIdx.getOrElse(0.U.asTypeOf(new RobPtr)), 0.U.asTypeOf(new RobPtr))
631    sink.bits.uop.storeSetHit    := Mux(enableMdp, source.bits.storeSetHit.getOrElse(false.B), false.B)
632    sink.bits.uop.loadWaitStrict := Mux(enableMdp, source.bits.loadWaitStrict.getOrElse(false.B), false.B)
633    sink.bits.uop.ssid           := Mux(enableMdp, source.bits.ssid.getOrElse(0.U(SSIDWidth.W)), 0.U(SSIDWidth.W))
634    sink.bits.uop.lqIdx          := source.bits.lqIdx.getOrElse(0.U.asTypeOf(new LqPtr))
635    sink.bits.uop.sqIdx          := source.bits.sqIdx.getOrElse(0.U.asTypeOf(new SqPtr))
636    sink.bits.uop.ftqPtr         := source.bits.ftqIdx.getOrElse(0.U.asTypeOf(new FtqPtr))
637    sink.bits.uop.ftqOffset      := source.bits.ftqOffset.getOrElse(0.U)
638    sink.bits.uop.debugInfo      := source.bits.perfDebugInfo
639    sink.bits.uop.vpu            := source.bits.vpu.getOrElse(0.U.asTypeOf(new VPUCtrlSignals))
640    sink.bits.uop.preDecodeInfo  := source.bits.preDecode.getOrElse(0.U.asTypeOf(new PreDecodeInfo))
641    sink.bits.uop.numLsElem      := source.bits.numLsElem.getOrElse(0.U) // Todo: remove this bundle, keep only the one below
642    sink.bits.flowNum.foreach(_  := source.bits.numLsElem.get)
643  }
644  io.mem.loadFastMatch := memScheduler.io.toMem.get.loadFastMatch.map(_.fastMatch)
645  io.mem.loadFastImm := memScheduler.io.toMem.get.loadFastMatch.map(_.fastImm)
646  io.mem.tlbCsr := csrio.tlb
647  io.mem.csrCtrl := csrio.customCtrl
648  io.mem.sfence := fenceio.sfence
649  io.mem.isStoreException := CommitType.lsInstIsStore(ctrlBlock.io.robio.exception.bits.commitType)
650  io.mem.isVlsException := ctrlBlock.io.robio.exception.bits.vls
651  require(io.mem.loadPcRead.size == params.LduCnt)
652  io.mem.loadPcRead.zipWithIndex.foreach { case (loadPcRead, i) =>
653    loadPcRead := ctrlBlock.io.memLdPcRead(i).data
654    ctrlBlock.io.memLdPcRead(i).valid := io.mem.issueLda(i).valid
655    ctrlBlock.io.memLdPcRead(i).ptr := io.mem.issueLda(i).bits.uop.ftqPtr
656    ctrlBlock.io.memLdPcRead(i).offset := io.mem.issueLda(i).bits.uop.ftqOffset
657  }
658
659  io.mem.storePcRead.zipWithIndex.foreach { case (storePcRead, i) =>
660    storePcRead := ctrlBlock.io.memStPcRead(i).data
661    ctrlBlock.io.memStPcRead(i).valid := io.mem.issueSta(i).valid
662    ctrlBlock.io.memStPcRead(i).ptr := io.mem.issueSta(i).bits.uop.ftqPtr
663    ctrlBlock.io.memStPcRead(i).offset := io.mem.issueSta(i).bits.uop.ftqOffset
664  }
665
666  io.mem.hyuPcRead.zipWithIndex.foreach( { case (hyuPcRead, i) =>
667    hyuPcRead := ctrlBlock.io.memHyPcRead(i).data
668    ctrlBlock.io.memHyPcRead(i).valid := io.mem.issueHylda(i).valid
669    ctrlBlock.io.memHyPcRead(i).ptr := io.mem.issueHylda(i).bits.uop.ftqPtr
670    ctrlBlock.io.memHyPcRead(i).offset := io.mem.issueHylda(i).bits.uop.ftqOffset
671  })
672
673  ctrlBlock.io.robio.robHeadLsIssue := io.mem.issueUops.map(deq => deq.fire && deq.bits.uop.robIdx === ctrlBlock.io.robio.robDeqPtr).reduce(_ || _)
674
675  // mem io
676  io.mem.lsqEnqIO <> memScheduler.io.memIO.get.lsqEnqIO
677  io.mem.robLsqIO <> ctrlBlock.io.robio.lsq
678
679  io.frontendSfence := fenceio.sfence
680  io.frontendTlbCsr := csrio.tlb
681  io.frontendCsrCtrl := csrio.customCtrl
682
683  io.tlb <> csrio.tlb
684
685  io.csrCustomCtrl := csrio.customCtrl
686
687  io.toTop.cpuHalted := false.B // TODO: implement cpu halt
688
689  io.debugTopDown.fromRob := ctrlBlock.io.debugTopDown.fromRob
690  ctrlBlock.io.debugTopDown.fromCore := io.debugTopDown.fromCore
691
692  io.debugRolling := ctrlBlock.io.debugRolling
693
694  if(backendParams.debugEn) {
695    dontTouch(memScheduler.io)
696    dontTouch(dataPath.io.toMemExu)
697    dontTouch(wbDataPath.io.fromMemExu)
698  }
699
700  // reset tree
701  if (p(DebugOptionsKey).ResetGen) {
702    val rightResetTree = ResetGenNode(Seq(
703      ModuleNode(dataPath),
704      ModuleNode(intExuBlock),
705      ModuleNode(fpExuBlock),
706      ModuleNode(vfExuBlock),
707      ModuleNode(bypassNetwork),
708      ModuleNode(wbDataPath)
709    ))
710    val leftResetTree = ResetGenNode(Seq(
711      ModuleNode(pcTargetMem),
712      ModuleNode(intScheduler),
713      ModuleNode(fpScheduler),
714      ModuleNode(vfScheduler),
715      ModuleNode(memScheduler),
716      ModuleNode(og2ForVector),
717      ModuleNode(wbFuBusyTable),
718      ResetGenNode(Seq(
719        ModuleNode(ctrlBlock),
720        ResetGenNode(Seq(
721          CellNode(io.frontendReset)
722        ))
723      ))
724    ))
725    ResetGen(leftResetTree, reset, sim = false)
726    ResetGen(rightResetTree, reset, sim = false)
727  } else {
728    io.frontendReset := DontCare
729  }
730
731  // perf events
732  val pfevent = Module(new PFEvent)
733  pfevent.io.distribute_csr := RegNext(csrio.customCtrl.distribute_csr)
734  val csrevents = pfevent.io.hpmevent.slice(8,16)
735
736  val ctrlBlockPerf    = ctrlBlock.getPerfEvents
737  val intSchedulerPerf = intScheduler.asInstanceOf[SchedulerArithImp].getPerfEvents
738  val fpSchedulerPerf  = fpScheduler.asInstanceOf[SchedulerArithImp].getPerfEvents
739  val vecSchedulerPerf = vfScheduler.asInstanceOf[SchedulerArithImp].getPerfEvents
740  val memSchedulerPerf = memScheduler.asInstanceOf[SchedulerMemImp].getPerfEvents
741
742  val perfBackend  = Seq()
743  // let index = 0 be no event
744  val allPerfEvents = Seq(("noEvent", 0.U)) ++ ctrlBlockPerf ++ intSchedulerPerf ++ fpSchedulerPerf ++ vecSchedulerPerf ++ memSchedulerPerf ++ perfBackend
745
746
747  if (printEventCoding) {
748    for (((name, inc), i) <- allPerfEvents.zipWithIndex) {
749      println("backend perfEvents Set", name, inc, i)
750    }
751  }
752
753  val allPerfInc = allPerfEvents.map(_._2.asTypeOf(new PerfEvent))
754  val perfEvents = HPerfMonitor(csrevents, allPerfInc).getPerfEvents
755  csrio.perf.perfEventsBackend := VecInit(perfEvents.map(_._2.asTypeOf(new PerfEvent)))
756  generatePerfEvent()
757}
758
759class BackendMemIO(implicit p: Parameters, params: BackendParams) extends XSBundle {
760  // Since fast load replay always use load unit 0, Backend flips two load port to avoid conflicts
761  val flippedLda = true
762  // params alias
763  private val LoadQueueSize = VirtualLoadQueueSize
764  // In/Out // Todo: split it into one-direction bundle
765  val lsqEnqIO = Flipped(new LsqEnqIO)
766  val robLsqIO = new RobLsqIO
767  val ldaIqFeedback = Vec(params.LduCnt, Flipped(new MemRSFeedbackIO))
768  val staIqFeedback = Vec(params.StaCnt, Flipped(new MemRSFeedbackIO))
769  val hyuIqFeedback = Vec(params.HyuCnt, Flipped(new MemRSFeedbackIO))
770  val vstuIqFeedback = Flipped(Vec(params.VstuCnt, new MemRSFeedbackIO(isVector = true)))
771  val vlduIqFeedback = Flipped(Vec(params.VlduCnt, new MemRSFeedbackIO(isVector = true)))
772  val ldCancel = Vec(params.LdExuCnt, Input(new LoadCancelIO))
773  val wakeup = Vec(params.LdExuCnt, Flipped(Valid(new DynInst)))
774  val loadPcRead = Vec(params.LduCnt, Output(UInt(VAddrBits.W)))
775  val storePcRead = Vec(params.StaCnt, Output(UInt(VAddrBits.W)))
776  val hyuPcRead = Vec(params.HyuCnt, Output(UInt(VAddrBits.W)))
777  // Input
778  val writebackLda = Vec(params.LduCnt, Flipped(DecoupledIO(new MemExuOutput)))
779  val writebackSta = Vec(params.StaCnt, Flipped(DecoupledIO(new MemExuOutput)))
780  val writebackStd = Vec(params.StdCnt, Flipped(DecoupledIO(new MemExuOutput)))
781  val writebackHyuLda = Vec(params.HyuCnt, Flipped(DecoupledIO(new MemExuOutput)))
782  val writebackHyuSta = Vec(params.HyuCnt, Flipped(DecoupledIO(new MemExuOutput)))
783  val writebackVldu = Vec(params.VlduCnt, Flipped(DecoupledIO(new MemExuOutput(true))))
784
785  val s3_delayed_load_error = Input(Vec(LoadPipelineWidth, Bool()))
786  val stIn = Input(Vec(params.StaExuCnt, ValidIO(new DynInst())))
787  val memoryViolation = Flipped(ValidIO(new Redirect))
788  val exceptionAddr = Input(new Bundle {
789    val vaddr = UInt(VAddrBits.W)
790    val gpaddr = UInt(GPAddrBits.W)
791  })
792  val sqDeq = Input(UInt(log2Ceil(EnsbufferWidth + 1).W))
793  val lqDeq = Input(UInt(log2Up(CommitWidth + 1).W))
794  val sqDeqPtr = Input(new SqPtr)
795  val lqDeqPtr = Input(new LqPtr)
796
797  val lqCancelCnt = Input(UInt(log2Up(VirtualLoadQueueSize + 1).W))
798  val sqCancelCnt = Input(UInt(log2Up(StoreQueueSize + 1).W))
799
800  val lqCanAccept = Input(Bool())
801  val sqCanAccept = Input(Bool())
802
803  val otherFastWakeup = Flipped(Vec(params.LduCnt + params.HyuCnt, ValidIO(new DynInst)))
804  val stIssuePtr = Input(new SqPtr())
805
806  val debugLS = Flipped(Output(new DebugLSIO))
807
808  val lsTopdownInfo = Vec(params.LduCnt + params.HyuCnt, Flipped(Output(new LsTopdownInfo)))
809  // Output
810  val redirect = ValidIO(new Redirect)   // rob flush MemBlock
811  val issueLda = MixedVec(Seq.fill(params.LduCnt)(DecoupledIO(new MemExuInput())))
812  val issueSta = MixedVec(Seq.fill(params.StaCnt)(DecoupledIO(new MemExuInput())))
813  val issueStd = MixedVec(Seq.fill(params.StdCnt)(DecoupledIO(new MemExuInput())))
814  val issueHylda = MixedVec(Seq.fill(params.HyuCnt)(DecoupledIO(new MemExuInput())))
815  val issueHysta = MixedVec(Seq.fill(params.HyuCnt)(DecoupledIO(new MemExuInput())))
816  val issueVldu = MixedVec(Seq.fill(params.VlduCnt)(DecoupledIO(new MemExuInput(true))))
817
818  val loadFastMatch = Vec(params.LduCnt, Output(UInt(params.LduCnt.W)))
819  val loadFastImm   = Vec(params.LduCnt, Output(UInt(12.W))) // Imm_I
820
821  val tlbCsr = Output(new TlbCsrBundle)
822  val csrCtrl = Output(new CustomCSRCtrlIO)
823  val sfence = Output(new SfenceBundle)
824  val isStoreException = Output(Bool())
825  val isVlsException = Output(Bool())
826
827  // ATTENTION: The issue ports' sequence order should be the same as IQs' deq config
828  private [backend] def issueUops: Seq[DecoupledIO[MemExuInput]] = {
829    issueSta ++
830      issueHylda ++ issueHysta ++
831      issueLda ++
832      issueVldu ++
833      issueStd
834  }.toSeq
835
836  // ATTENTION: The writeback ports' sequence order should be the same as IQs' deq config
837  private [backend] def writeBack: Seq[DecoupledIO[MemExuOutput]] = {
838    writebackSta ++
839      writebackHyuLda ++ writebackHyuSta ++
840      writebackLda ++
841      writebackVldu ++
842      writebackStd
843  }
844}
845
846class TopToBackendBundle(implicit p: Parameters) extends XSBundle {
847  val hartId            = Output(UInt(hartIdLen.W))
848  val externalInterrupt = Output(new ExternalInterruptIO)
849  val msiInfo           = Output(ValidIO(new MsiInfoBundle))
850  val clintTime         = Output(ValidIO(UInt(64.W)))
851}
852
853class BackendToTopBundle extends Bundle {
854  val cpuHalted = Output(Bool())
855}
856
857class BackendIO(implicit p: Parameters, params: BackendParams) extends XSBundle with HasSoCParameter {
858  val fromTop = Flipped(new TopToBackendBundle)
859
860  val toTop = new BackendToTopBundle
861
862  val fenceio = new FenceIO
863  // Todo: merge these bundles into BackendFrontendIO
864  val frontend = Flipped(new FrontendToCtrlIO)
865  val frontendSfence = Output(new SfenceBundle)
866  val frontendCsrCtrl = Output(new CustomCSRCtrlIO)
867  val frontendTlbCsr = Output(new TlbCsrBundle)
868  val frontendReset = Output(Reset())
869
870  val mem = new BackendMemIO
871
872  val perf = Input(new PerfCounterIO)
873
874  val tlb = Output(new TlbCsrBundle)
875
876  val csrCustomCtrl = Output(new CustomCSRCtrlIO)
877
878  val debugTopDown = new Bundle {
879    val fromRob = new RobCoreTopDownIO
880    val fromCore = new CoreDispatchTopDownIO
881  }
882  val debugRolling = new RobDebugRollingIO
883}
884