xref: /XiangShan/src/main/scala/xiangshan/backend/Backend.scala (revision 6dbb4e08d0a2fec5d507ecb3e87745164ded1f8e)
1package xiangshan.backend
2
3import org.chipsalliance.cde.config.Parameters
4import chisel3._
5import chisel3.util._
6import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp}
7import utility.{Constantin, ZeroExt}
8import xiangshan._
9import xiangshan.backend.Bundles.{DynInst, IssueQueueIQWakeUpBundle, LoadShouldCancel, MemExuInput, MemExuOutput, VPUCtrlSignals}
10import xiangshan.backend.ctrlblock.{DebugLSIO, LsTopdownInfo}
11import xiangshan.backend.datapath.DataConfig.{IntData, VecData}
12import xiangshan.backend.datapath.RdConfig.{IntRD, VfRD}
13import xiangshan.backend.datapath.WbConfig._
14import xiangshan.backend.datapath._
15import xiangshan.backend.dispatch.CoreDispatchTopDownIO
16import xiangshan.backend.exu.ExuBlock
17import xiangshan.backend.fu.vector.Bundles.{VConfig, VType}
18import xiangshan.backend.fu.{FenceIO, FenceToSbuffer, FuConfig, FuType, PerfCounterIO}
19import xiangshan.backend.issue.EntryBundles._
20import xiangshan.backend.issue.{CancelNetwork, Scheduler, SchedulerImpBase}
21import xiangshan.backend.rob.{RobCoreTopDownIO, RobDebugRollingIO, RobLsqIO, RobPtr}
22import xiangshan.frontend.{FtqPtr, FtqRead, PreDecodeInfo}
23import xiangshan.mem.{LqPtr, LsqEnqIO, SqPtr}
24import scala.collection.mutable
25
26class Backend(val params: BackendParams)(implicit p: Parameters) extends LazyModule
27  with HasXSParameter {
28
29  override def shouldBeInlined: Boolean = false
30
31  /* Only update the idx in mem-scheduler here
32   * Idx in other schedulers can be updated the same way if needed
33   *
34   * Also note that we filter out the 'stData issue-queues' when counting
35   */
36  for ((ibp, idx) <- params.memSchdParams.get.issueBlockParams.filter(iq => iq.StdCnt == 0).zipWithIndex) {
37    ibp.updateIdx(idx)
38  }
39
40  println(params.iqWakeUpParams)
41
42  for ((schdCfg, i) <- params.allSchdParams.zipWithIndex) {
43    schdCfg.bindBackendParam(params)
44  }
45
46  for ((iqCfg, i) <- params.allIssueParams.zipWithIndex) {
47    iqCfg.bindBackendParam(params)
48  }
49
50  for ((exuCfg, i) <- params.allExuParams.zipWithIndex) {
51    exuCfg.bindBackendParam(params)
52    exuCfg.updateIQWakeUpConfigs(params.iqWakeUpParams)
53    exuCfg.updateExuIdx(i)
54  }
55
56  println("[Backend] ExuConfigs:")
57  for (exuCfg <- params.allExuParams) {
58    val fuConfigs = exuCfg.fuConfigs
59    val wbPortConfigs = exuCfg.wbPortConfigs
60    val immType = exuCfg.immType
61
62    println("[Backend]   " +
63      s"${exuCfg.name}: " +
64      (if (exuCfg.fakeUnit) "fake, " else "") +
65      (if (exuCfg.hasLoadFu || exuCfg.hasHyldaFu) s"LdExuIdx(${backendParams.getLdExuIdx(exuCfg)})" else "") +
66      s"${fuConfigs.map(_.name).mkString("fu(s): {", ",", "}")}, " +
67      s"${wbPortConfigs.mkString("wb: {", ",", "}")}, " +
68      s"${immType.map(SelImm.mkString(_)).mkString("imm: {", ",", "}")}, " +
69      s"latMax(${exuCfg.latencyValMax}), ${exuCfg.fuLatancySet.mkString("lat: {", ",", "}")}, " +
70      s"srcReg(${exuCfg.numRegSrc})"
71    )
72    require(
73      wbPortConfigs.collectFirst { case x: IntWB => x }.nonEmpty ==
74        fuConfigs.map(_.writeIntRf).reduce(_ || _),
75      s"${exuCfg.name} int wb port has no priority"
76    )
77    require(
78      wbPortConfigs.collectFirst { case x: VfWB => x }.nonEmpty ==
79        fuConfigs.map(x => x.writeFpRf || x.writeVecRf).reduce(_ || _),
80      s"${exuCfg.name} vec wb port has no priority"
81    )
82  }
83
84  println(s"[Backend] all fu configs")
85  for (cfg <- FuConfig.allConfigs) {
86    println(s"[Backend]   $cfg")
87  }
88
89  println(s"[Backend] Int RdConfigs: ExuName(Priority)")
90  for ((port, seq) <- params.getRdPortParams(IntData())) {
91    println(s"[Backend]   port($port): ${seq.map(x => params.getExuName(x._1) + "(" + x._2.toString + ")").mkString(",")}")
92  }
93
94  println(s"[Backend] Int WbConfigs: ExuName(Priority)")
95  for ((port, seq) <- params.getWbPortParams(IntData())) {
96    println(s"[Backend]   port($port): ${seq.map(x => params.getExuName(x._1) + "(" + x._2.toString + ")").mkString(",")}")
97  }
98
99  println(s"[Backend] Vf RdConfigs: ExuName(Priority)")
100  for ((port, seq) <- params.getRdPortParams(VecData())) {
101    println(s"[Backend]   port($port): ${seq.map(x => params.getExuName(x._1) + "(" + x._2.toString + ")").mkString(",")}")
102  }
103
104  println(s"[Backend] Vf WbConfigs: ExuName(Priority)")
105  for ((port, seq) <- params.getWbPortParams(VecData())) {
106    println(s"[Backend]   port($port): ${seq.map(x => params.getExuName(x._1) + "(" + x._2.toString + ")").mkString(",")}")
107  }
108
109  println(s"[Backend] Dispatch Configs:")
110  println(s"[Backend] Load IQ enq width(${params.numLoadDp}), Store IQ enq width(${params.numStoreDp})")
111  println(s"[Backend] Load DP width(${LSQLdEnqWidth}), Store DP width(${LSQStEnqWidth})")
112
113  params.updateCopyPdestInfo
114  println(s"[Backend] copyPdestInfo ${params.copyPdestInfo}")
115  params.allExuParams.map(_.copyNum)
116  val ctrlBlock = LazyModule(new CtrlBlock(params))
117  val pcTargetMem = LazyModule(new PcTargetMem(params))
118  val intScheduler = params.intSchdParams.map(x => LazyModule(new Scheduler(x)))
119  val vfScheduler = params.vfSchdParams.map(x => LazyModule(new Scheduler(x)))
120  val memScheduler = params.memSchdParams.map(x => LazyModule(new Scheduler(x)))
121  val dataPath = LazyModule(new DataPath(params))
122  val intExuBlock = params.intSchdParams.map(x => LazyModule(new ExuBlock(x)))
123  val vfExuBlock = params.vfSchdParams.map(x => LazyModule(new ExuBlock(x)))
124  val wbFuBusyTable = LazyModule(new WbFuBusyTable(params))
125
126  lazy val module = new BackendImp(this)
127}
128
129class BackendImp(override val wrapper: Backend)(implicit p: Parameters) extends LazyModuleImp(wrapper)
130  with HasXSParameter {
131  implicit private val params = wrapper.params
132
133  val io = IO(new BackendIO()(p, wrapper.params))
134
135  private val ctrlBlock = wrapper.ctrlBlock.module
136  private val pcTargetMem = wrapper.pcTargetMem.module
137  private val intScheduler: SchedulerImpBase = wrapper.intScheduler.get.module
138  private val vfScheduler = wrapper.vfScheduler.get.module
139  private val memScheduler = wrapper.memScheduler.get.module
140  private val dataPath = wrapper.dataPath.module
141  private val intExuBlock = wrapper.intExuBlock.get.module
142  private val vfExuBlock = wrapper.vfExuBlock.get.module
143  private val bypassNetwork = Module(new BypassNetwork)
144  private val wbDataPath = Module(new WbDataPath(params))
145  private val wbFuBusyTable = wrapper.wbFuBusyTable.module
146
147  private val iqWakeUpMappedBundle: Map[Int, ValidIO[IssueQueueIQWakeUpBundle]] = (
148    intScheduler.io.toSchedulers.wakeupVec ++
149      vfScheduler.io.toSchedulers.wakeupVec ++
150      memScheduler.io.toSchedulers.wakeupVec
151    ).map(x => (x.bits.exuIdx, x)).toMap
152
153  println(s"[Backend] iq wake up keys: ${iqWakeUpMappedBundle.keys}")
154
155  wbFuBusyTable.io.in.intSchdBusyTable := intScheduler.io.wbFuBusyTable
156  wbFuBusyTable.io.in.vfSchdBusyTable := vfScheduler.io.wbFuBusyTable
157  wbFuBusyTable.io.in.memSchdBusyTable := memScheduler.io.wbFuBusyTable
158  intScheduler.io.fromWbFuBusyTable.fuBusyTableRead := wbFuBusyTable.io.out.intRespRead
159  vfScheduler.io.fromWbFuBusyTable.fuBusyTableRead := wbFuBusyTable.io.out.vfRespRead
160  memScheduler.io.fromWbFuBusyTable.fuBusyTableRead := wbFuBusyTable.io.out.memRespRead
161  dataPath.io.wbConfictRead := wbFuBusyTable.io.out.wbConflictRead
162
163  private val og1CancelOH: UInt = dataPath.io.og1CancelOH
164  private val og0CancelOH: UInt = dataPath.io.og0CancelOH
165  private val cancelToBusyTable = dataPath.io.cancelToBusyTable
166
167  ctrlBlock.io.IQValidNumVec := intScheduler.io.IQValidNumVec
168  ctrlBlock.io.fromTop.hartId := io.fromTop.hartId
169  ctrlBlock.io.frontend <> io.frontend
170  ctrlBlock.io.fromWB.wbData <> wbDataPath.io.toCtrlBlock.writeback
171  ctrlBlock.io.fromMem.stIn <> io.mem.stIn
172  ctrlBlock.io.fromMem.violation <> io.mem.memoryViolation
173  ctrlBlock.io.lqCanAccept := io.mem.lqCanAccept
174  ctrlBlock.io.sqCanAccept := io.mem.sqCanAccept
175  ctrlBlock.io.csrCtrl <> intExuBlock.io.csrio.get.customCtrl
176  ctrlBlock.io.robio.csr.intrBitSet := intExuBlock.io.csrio.get.interrupt
177  ctrlBlock.io.robio.csr.trapTarget := intExuBlock.io.csrio.get.trapTarget
178  ctrlBlock.io.robio.csr.isXRet := intExuBlock.io.csrio.get.isXRet
179  ctrlBlock.io.robio.csr.wfiEvent := intExuBlock.io.csrio.get.wfi_event
180  ctrlBlock.io.robio.lsq <> io.mem.robLsqIO
181  ctrlBlock.io.robio.lsTopdownInfo <> io.mem.lsTopdownInfo
182  ctrlBlock.io.robio.debug_ls <> io.mem.debugLS
183  ctrlBlock.perfinfo := DontCare // TODO: Implement backend hpm
184  ctrlBlock.io.debugEnqLsq.canAccept := io.mem.lsqEnqIO.canAccept
185  ctrlBlock.io.debugEnqLsq.resp := io.mem.lsqEnqIO.resp
186  ctrlBlock.io.debugEnqLsq.req := memScheduler.io.memIO.get.lsqEnqIO.req
187  ctrlBlock.io.debugEnqLsq.needAlloc := memScheduler.io.memIO.get.lsqEnqIO.needAlloc
188
189
190  intScheduler.io.fromTop.hartId := io.fromTop.hartId
191  intScheduler.io.fromCtrlBlock.flush := ctrlBlock.io.toIssueBlock.flush
192  intScheduler.io.fromDispatch.allocPregs <> ctrlBlock.io.toIssueBlock.allocPregs
193  intScheduler.io.fromDispatch.uops <> ctrlBlock.io.toIssueBlock.intUops
194  intScheduler.io.intWriteBack := wbDataPath.io.toIntPreg
195  intScheduler.io.vfWriteBack := 0.U.asTypeOf(intScheduler.io.vfWriteBack)
196  intScheduler.io.fromDataPath.resp := dataPath.io.toIntIQ
197  intScheduler.io.fromSchedulers.wakeupVec.foreach { wakeup => wakeup := iqWakeUpMappedBundle(wakeup.bits.exuIdx) }
198  intScheduler.io.fromDataPath.og0Cancel := og0CancelOH
199  intScheduler.io.fromDataPath.og1Cancel := og1CancelOH
200  intScheduler.io.ldCancel := io.mem.ldCancel
201  intScheduler.io.fromDataPath.cancelToBusyTable := cancelToBusyTable
202
203  memScheduler.io.fromTop.hartId := io.fromTop.hartId
204  memScheduler.io.fromCtrlBlock.flush := ctrlBlock.io.toIssueBlock.flush
205  memScheduler.io.fromDispatch.allocPregs <> ctrlBlock.io.toIssueBlock.allocPregs
206  memScheduler.io.fromDispatch.uops <> ctrlBlock.io.toIssueBlock.memUops
207  memScheduler.io.intWriteBack := wbDataPath.io.toIntPreg
208  memScheduler.io.vfWriteBack := wbDataPath.io.toVfPreg
209  memScheduler.io.fromMem.get.scommit := io.mem.sqDeq
210  memScheduler.io.fromMem.get.lcommit := io.mem.lqDeq
211  memScheduler.io.fromMem.get.wakeup := io.mem.wakeup
212  memScheduler.io.fromMem.get.sqDeqPtr := io.mem.sqDeqPtr
213  memScheduler.io.fromMem.get.lqDeqPtr := io.mem.lqDeqPtr
214  memScheduler.io.fromMem.get.sqCancelCnt := io.mem.sqCancelCnt
215  memScheduler.io.fromMem.get.lqCancelCnt := io.mem.lqCancelCnt
216  memScheduler.io.fromMem.get.stIssuePtr := io.mem.stIssuePtr
217  require(memScheduler.io.fromMem.get.memWaitUpdateReq.robIdx.length == io.mem.stIn.length)
218  memScheduler.io.fromMem.get.memWaitUpdateReq.robIdx.zip(io.mem.stIn).foreach { case (sink, source) =>
219    sink.valid := source.valid
220    sink.bits  := source.bits.robIdx
221  }
222  memScheduler.io.fromMem.get.memWaitUpdateReq.sqIdx := DontCare // TODO
223  memScheduler.io.fromDataPath.resp := dataPath.io.toMemIQ
224  memScheduler.io.fromMem.get.ldaFeedback := io.mem.ldaIqFeedback
225  memScheduler.io.fromMem.get.staFeedback := io.mem.staIqFeedback
226  memScheduler.io.fromMem.get.hyuFeedback := io.mem.hyuIqFeedback
227  memScheduler.io.fromSchedulers.wakeupVec.foreach { wakeup => wakeup := iqWakeUpMappedBundle(wakeup.bits.exuIdx) }
228  memScheduler.io.fromDataPath.og0Cancel := og0CancelOH
229  memScheduler.io.fromDataPath.og1Cancel := og1CancelOH
230  memScheduler.io.ldCancel := io.mem.ldCancel
231  memScheduler.io.fromDataPath.cancelToBusyTable := cancelToBusyTable
232
233  vfScheduler.io.fromTop.hartId := io.fromTop.hartId
234  vfScheduler.io.fromCtrlBlock.flush := ctrlBlock.io.toIssueBlock.flush
235  vfScheduler.io.fromDispatch.allocPregs <> ctrlBlock.io.toIssueBlock.allocPregs
236  vfScheduler.io.fromDispatch.uops <> ctrlBlock.io.toIssueBlock.vfUops
237  vfScheduler.io.intWriteBack := 0.U.asTypeOf(vfScheduler.io.intWriteBack)
238  vfScheduler.io.vfWriteBack := wbDataPath.io.toVfPreg
239  vfScheduler.io.fromDataPath.resp := dataPath.io.toVfIQ
240  vfScheduler.io.fromSchedulers.wakeupVec.foreach { wakeup => wakeup := iqWakeUpMappedBundle(wakeup.bits.exuIdx) }
241  vfScheduler.io.fromDataPath.og0Cancel := og0CancelOH
242  vfScheduler.io.fromDataPath.og1Cancel := og1CancelOH
243  vfScheduler.io.ldCancel := io.mem.ldCancel
244  vfScheduler.io.fromDataPath.cancelToBusyTable := cancelToBusyTable
245
246  dataPath.io.hartId := io.fromTop.hartId
247  dataPath.io.flush := ctrlBlock.io.toDataPath.flush
248
249  dataPath.io.fromIntIQ <> intScheduler.io.toDataPathAfterDelay
250  dataPath.io.fromVfIQ <> vfScheduler.io.toDataPathAfterDelay
251  dataPath.io.fromMemIQ <> memScheduler.io.toDataPathAfterDelay
252
253  dataPath.io.ldCancel := io.mem.ldCancel
254
255  println(s"[Backend] wbDataPath.io.toIntPreg: ${wbDataPath.io.toIntPreg.size}, dataPath.io.fromIntWb: ${dataPath.io.fromIntWb.size}")
256  println(s"[Backend] wbDataPath.io.toVfPreg: ${wbDataPath.io.toVfPreg.size}, dataPath.io.fromFpWb: ${dataPath.io.fromVfWb.size}")
257  dataPath.io.fromIntWb := wbDataPath.io.toIntPreg
258  dataPath.io.fromVfWb := wbDataPath.io.toVfPreg
259  dataPath.io.debugIntRat    .foreach(_ := ctrlBlock.io.debug_int_rat.get)
260  dataPath.io.debugFpRat     .foreach(_ := ctrlBlock.io.debug_fp_rat.get)
261  dataPath.io.debugVecRat    .foreach(_ := ctrlBlock.io.debug_vec_rat.get)
262  dataPath.io.debugVconfigRat.foreach(_ := ctrlBlock.io.debug_vconfig_rat.get)
263
264  bypassNetwork.io.fromDataPath.int <> dataPath.io.toIntExu
265  bypassNetwork.io.fromDataPath.vf <> dataPath.io.toFpExu
266  bypassNetwork.io.fromDataPath.mem <> dataPath.io.toMemExu
267  bypassNetwork.io.fromDataPath.immInfo := dataPath.io.og1ImmInfo
268  bypassNetwork.io.fromExus.connectExuOutput(_.int)(intExuBlock.io.out)
269  bypassNetwork.io.fromExus.connectExuOutput(_.vf)(vfExuBlock.io.out)
270
271  require(bypassNetwork.io.fromExus.mem.flatten.size == io.mem.writeBack.size,
272    s"bypassNetwork.io.fromExus.mem.flatten.size(${bypassNetwork.io.fromExus.mem.flatten.size}: ${bypassNetwork.io.fromExus.mem.map(_.size)}, " +
273    s"io.mem.writeback(${io.mem.writeBack.size})"
274  )
275  bypassNetwork.io.fromExus.mem.flatten.zip(io.mem.writeBack).foreach { case (sink, source) =>
276    sink.valid := source.valid
277    sink.bits.pdest := source.bits.uop.pdest
278    sink.bits.data := source.bits.data
279  }
280
281
282  intExuBlock.io.flush := ctrlBlock.io.toExuBlock.flush
283  for (i <- 0 until intExuBlock.io.in.length) {
284    for (j <- 0 until intExuBlock.io.in(i).length) {
285      val shouldLdCancel = LoadShouldCancel(bypassNetwork.io.toExus.int(i)(j).bits.loadDependency, io.mem.ldCancel)
286      NewPipelineConnect(
287        bypassNetwork.io.toExus.int(i)(j), intExuBlock.io.in(i)(j), intExuBlock.io.in(i)(j).fire,
288        Mux(
289          bypassNetwork.io.toExus.int(i)(j).fire,
290          bypassNetwork.io.toExus.int(i)(j).bits.robIdx.needFlush(ctrlBlock.io.toExuBlock.flush) || shouldLdCancel,
291          intExuBlock.io.in(i)(j).bits.robIdx.needFlush(ctrlBlock.io.toExuBlock.flush)
292        ),
293        Option("intExuBlock2bypassNetwork")
294      )
295    }
296  }
297
298  pcTargetMem.io.fromFrontendFtq := io.frontend.fromFtq
299  pcTargetMem.io.toDataPath <> dataPath.io.fromPcTargetMem
300
301  private val csrio = intExuBlock.io.csrio.get
302  csrio.hartId := io.fromTop.hartId
303  csrio.fpu.fflags := ctrlBlock.io.robio.csr.fflags
304  csrio.fpu.isIllegal := false.B // Todo: remove it
305  csrio.fpu.dirty_fs := ctrlBlock.io.robio.csr.dirty_fs
306  csrio.vpu <> 0.U.asTypeOf(csrio.vpu) // Todo
307
308  val debugVconfig = dataPath.io.debugVconfig match {
309    case Some(x) => dataPath.io.debugVconfig.get.asTypeOf(new VConfig)
310    case None => 0.U.asTypeOf(new VConfig)
311  }
312  val debugVtype = VType.toVtypeStruct(debugVconfig.vtype).asUInt
313  val debugVl = debugVconfig.vl
314  csrio.vpu.set_vxsat := ctrlBlock.io.robio.csr.vxsat
315  csrio.vpu.set_vstart.valid := ctrlBlock.io.robio.csr.vstart.valid
316  csrio.vpu.set_vstart.bits := ctrlBlock.io.robio.csr.vstart.bits
317  csrio.vpu.set_vtype.valid := ctrlBlock.io.robio.csr.vcsrFlag
318  //Todo here need change design
319  csrio.vpu.set_vtype.bits := ZeroExt(debugVtype, XLEN)
320  csrio.vpu.set_vl.valid := ctrlBlock.io.robio.csr.vcsrFlag
321  csrio.vpu.set_vl.bits := ZeroExt(debugVl, XLEN)
322  csrio.exception := ctrlBlock.io.robio.exception
323  csrio.memExceptionVAddr := io.mem.exceptionVAddr
324  csrio.externalInterrupt := io.fromTop.externalInterrupt
325  csrio.distributedUpdate(0) := io.mem.csrDistributedUpdate
326  csrio.distributedUpdate(1) := io.frontendCsrDistributedUpdate
327  csrio.perf <> io.perf
328  csrio.perf.retiredInstr <> ctrlBlock.io.robio.csr.perfinfo.retiredInstr
329  csrio.perf.ctrlInfo <> ctrlBlock.io.perfInfo.ctrlInfo
330  csrio.perf.perfEventsCtrl <> ctrlBlock.getPerf
331  private val fenceio = intExuBlock.io.fenceio.get
332  io.fenceio <> fenceio
333  fenceio.disableSfence := csrio.disableSfence
334
335  vfExuBlock.io.flush := ctrlBlock.io.toExuBlock.flush
336  for (i <- 0 until vfExuBlock.io.in.size) {
337    for (j <- 0 until vfExuBlock.io.in(i).size) {
338      val shouldLdCancel = LoadShouldCancel(bypassNetwork.io.toExus.vf(i)(j).bits.loadDependency, io.mem.ldCancel)
339      NewPipelineConnect(
340        bypassNetwork.io.toExus.vf(i)(j), vfExuBlock.io.in(i)(j), vfExuBlock.io.in(i)(j).fire,
341        Mux(
342          bypassNetwork.io.toExus.vf(i)(j).fire,
343          bypassNetwork.io.toExus.vf(i)(j).bits.robIdx.needFlush(ctrlBlock.io.toExuBlock.flush) || shouldLdCancel,
344          vfExuBlock.io.in(i)(j).bits.robIdx.needFlush(ctrlBlock.io.toExuBlock.flush)
345        ),
346        Option("vfExuBlock2bypassNetwork")
347      )
348
349      vfExuBlock.io.in(i)(j).bits.vpu.foreach(_.vstart := csrio.vpu.vstart)
350    }
351  }
352
353  intExuBlock.io.frm.foreach(_ := csrio.fpu.frm)
354  vfExuBlock.io.frm.foreach(_ := csrio.fpu.frm)
355  vfExuBlock.io.vxrm.foreach(_ := csrio.vpu.vxrm)
356
357  wbDataPath.io.flush := ctrlBlock.io.redirect
358  wbDataPath.io.fromTop.hartId := io.fromTop.hartId
359  wbDataPath.io.fromIntExu <> intExuBlock.io.out
360  wbDataPath.io.fromVfExu <> vfExuBlock.io.out
361  wbDataPath.io.fromMemExu.flatten.zip(io.mem.writeBack).foreach { case (sink, source) =>
362    sink.valid := source.valid
363    source.ready := sink.ready
364    sink.bits.data   := source.bits.data
365    sink.bits.pdest  := source.bits.uop.pdest
366    sink.bits.robIdx := source.bits.uop.robIdx
367    sink.bits.intWen.foreach(_ := source.bits.uop.rfWen)
368    sink.bits.fpWen.foreach(_ := source.bits.uop.fpWen)
369    sink.bits.vecWen.foreach(_ := source.bits.uop.vecWen)
370    sink.bits.exceptionVec.foreach(_ := source.bits.uop.exceptionVec)
371    sink.bits.flushPipe.foreach(_ := source.bits.uop.flushPipe)
372    sink.bits.replay.foreach(_ := source.bits.uop.replayInst)
373    sink.bits.debug := source.bits.debug
374    sink.bits.debugInfo := source.bits.uop.debugInfo
375    sink.bits.lqIdx.foreach(_ := source.bits.uop.lqIdx)
376    sink.bits.sqIdx.foreach(_ := source.bits.uop.sqIdx)
377    sink.bits.predecodeInfo.foreach(_ := source.bits.uop.preDecodeInfo)
378    sink.bits.vls.foreach(x => {
379      x.vdIdx := source.bits.vdIdx.get
380      x.vdIdxInField := source.bits.vdIdxInField.get
381      x.vpu   := source.bits.uop.vpu
382      x.oldVdPsrc := source.bits.uop.psrc(2)
383      x.isIndexed := VlduType.isIndexed(source.bits.uop.fuOpType)
384      x.isMasked := VlduType.isMasked(source.bits.uop.fuOpType)
385    })
386    sink.bits.trigger.foreach(_ := source.bits.uop.trigger)
387  }
388
389  // to mem
390  private val memIssueParams = params.memSchdParams.get.issueBlockParams
391  private val memExuBlocksHasLDU = memIssueParams.map(_.exuBlockParams.map(x => x.hasLoadFu || x.hasHyldaFu))
392  println(s"[Backend] memExuBlocksHasLDU: $memExuBlocksHasLDU")
393
394  private val toMem = Wire(bypassNetwork.io.toExus.mem.cloneType)
395  for (i <- toMem.indices) {
396    for (j <- toMem(i).indices) {
397      val shouldLdCancel = LoadShouldCancel(bypassNetwork.io.toExus.mem(i)(j).bits.loadDependency, io.mem.ldCancel)
398      val issueTimeout =
399        if (memExuBlocksHasLDU(i)(j))
400          Counter(0 until 16, toMem(i)(j).valid && !toMem(i)(j).fire, bypassNetwork.io.toExus.mem(i)(j).fire)._2
401        else
402          false.B
403
404      if (memScheduler.io.loadFinalIssueResp(i).nonEmpty && memExuBlocksHasLDU(i)(j)) {
405        memScheduler.io.loadFinalIssueResp(i)(j).valid := issueTimeout
406        memScheduler.io.loadFinalIssueResp(i)(j).bits.fuType := toMem(i)(j).bits.fuType
407        memScheduler.io.loadFinalIssueResp(i)(j).bits.resp := RespType.block
408        memScheduler.io.loadFinalIssueResp(i)(j).bits.robIdx := toMem(i)(j).bits.robIdx
409        memScheduler.io.loadFinalIssueResp(i)(j).bits.uopIdx.foreach(_ := toMem(i)(j).bits.vpu.get.vuopIdx)
410      }
411
412      NewPipelineConnect(
413        bypassNetwork.io.toExus.mem(i)(j), toMem(i)(j), toMem(i)(j).fire,
414        Mux(
415          bypassNetwork.io.toExus.mem(i)(j).fire,
416          bypassNetwork.io.toExus.mem(i)(j).bits.robIdx.needFlush(ctrlBlock.io.toExuBlock.flush) || shouldLdCancel,
417          toMem(i)(j).bits.robIdx.needFlush(ctrlBlock.io.toExuBlock.flush) || issueTimeout
418        ),
419        Option("bypassNetwork2toMemExus")
420      )
421
422      if (memScheduler.io.memAddrIssueResp(i).nonEmpty && memExuBlocksHasLDU(i)(j)) {
423        memScheduler.io.memAddrIssueResp(i)(j).valid := toMem(i)(j).fire && FuType.isLoad(toMem(i)(j).bits.fuType)
424        memScheduler.io.memAddrIssueResp(i)(j).bits.fuType := toMem(i)(j).bits.fuType
425        memScheduler.io.memAddrIssueResp(i)(j).bits.robIdx := toMem(i)(j).bits.robIdx
426        memScheduler.io.memAddrIssueResp(i)(j).bits.resp := RespType.success // for load inst, firing at toMem means issuing successfully
427      }
428    }
429  }
430
431  io.mem.redirect := ctrlBlock.io.redirect
432  io.mem.issueUops.zip(toMem.flatten).foreach { case (sink, source) =>
433    val enableMdp = Constantin.createRecord("EnableMdp", true.B)(0)
434    sink.valid := source.valid
435    source.ready := sink.ready
436    sink.bits.iqIdx              := source.bits.iqIdx
437    sink.bits.isFirstIssue       := source.bits.isFirstIssue
438    sink.bits.uop                := 0.U.asTypeOf(sink.bits.uop)
439    sink.bits.src                := 0.U.asTypeOf(sink.bits.src)
440    sink.bits.src.zip(source.bits.src).foreach { case (l, r) => l := r}
441    sink.bits.uop.fuType         := source.bits.fuType
442    sink.bits.uop.fuOpType       := source.bits.fuOpType
443    sink.bits.uop.imm            := source.bits.imm
444    sink.bits.uop.robIdx         := source.bits.robIdx
445    sink.bits.uop.pdest          := source.bits.pdest
446    sink.bits.uop.rfWen          := source.bits.rfWen.getOrElse(false.B)
447    sink.bits.uop.fpWen          := source.bits.fpWen.getOrElse(false.B)
448    sink.bits.uop.vecWen         := source.bits.vecWen.getOrElse(false.B)
449    sink.bits.uop.flushPipe      := source.bits.flushPipe.getOrElse(false.B)
450    sink.bits.uop.pc             := source.bits.pc.getOrElse(0.U)
451    sink.bits.uop.loadWaitBit    := Mux(enableMdp, source.bits.loadWaitBit.getOrElse(false.B), false.B)
452    sink.bits.uop.waitForRobIdx  := Mux(enableMdp, source.bits.waitForRobIdx.getOrElse(0.U.asTypeOf(new RobPtr)), 0.U.asTypeOf(new RobPtr))
453    sink.bits.uop.storeSetHit    := Mux(enableMdp, source.bits.storeSetHit.getOrElse(false.B), false.B)
454    sink.bits.uop.loadWaitStrict := Mux(enableMdp, source.bits.loadWaitStrict.getOrElse(false.B), false.B)
455    sink.bits.uop.ssid           := Mux(enableMdp, source.bits.ssid.getOrElse(0.U(SSIDWidth.W)), 0.U(SSIDWidth.W))
456    sink.bits.uop.lqIdx          := source.bits.lqIdx.getOrElse(0.U.asTypeOf(new LqPtr))
457    sink.bits.uop.sqIdx          := source.bits.sqIdx.getOrElse(0.U.asTypeOf(new SqPtr))
458    sink.bits.uop.ftqPtr         := source.bits.ftqIdx.getOrElse(0.U.asTypeOf(new FtqPtr))
459    sink.bits.uop.ftqOffset      := source.bits.ftqOffset.getOrElse(0.U)
460    sink.bits.uop.debugInfo      := source.bits.perfDebugInfo
461    sink.bits.uop.vpu            := source.bits.vpu.getOrElse(0.U.asTypeOf(new VPUCtrlSignals))
462    sink.bits.uop.preDecodeInfo  := source.bits.preDecode.getOrElse(0.U.asTypeOf(new PreDecodeInfo))
463    sink.bits.uop.numLsElem      := source.bits.numLsElem.getOrElse(0.U) // Todo: remove this bundle, keep only the one below
464    sink.bits.flowNum.foreach(_  := source.bits.numLsElem.get)
465  }
466  io.mem.loadFastMatch := memScheduler.io.toMem.get.loadFastMatch.map(_.fastMatch)
467  io.mem.loadFastImm := memScheduler.io.toMem.get.loadFastMatch.map(_.fastImm)
468  io.mem.tlbCsr := csrio.tlb
469  io.mem.csrCtrl := csrio.customCtrl
470  io.mem.sfence := fenceio.sfence
471  io.mem.isStoreException := CommitType.lsInstIsStore(ctrlBlock.io.robio.exception.bits.commitType)
472  io.mem.isVlsException := ctrlBlock.io.robio.exception.bits.vls
473  require(io.mem.loadPcRead.size == params.LduCnt)
474  io.mem.loadPcRead.zipWithIndex.foreach { case (loadPcRead, i) =>
475    loadPcRead := ctrlBlock.io.memLdPcRead(i).data
476    ctrlBlock.io.memLdPcRead(i).vld := io.mem.issueLda(i).valid
477    ctrlBlock.io.memLdPcRead(i).ptr := io.mem.issueLda(i).bits.uop.ftqPtr
478    ctrlBlock.io.memLdPcRead(i).offset := io.mem.issueLda(i).bits.uop.ftqOffset
479  }
480
481  io.mem.storePcRead.zipWithIndex.foreach { case (storePcRead, i) =>
482    storePcRead := ctrlBlock.io.memStPcRead(i).data
483    ctrlBlock.io.memStPcRead(i).vld := io.mem.issueSta(i).valid
484    ctrlBlock.io.memStPcRead(i).ptr := io.mem.issueSta(i).bits.uop.ftqPtr
485    ctrlBlock.io.memStPcRead(i).offset := io.mem.issueSta(i).bits.uop.ftqOffset
486  }
487
488  io.mem.hyuPcRead.zipWithIndex.foreach( { case (hyuPcRead, i) =>
489    hyuPcRead := ctrlBlock.io.memHyPcRead(i).data
490    ctrlBlock.io.memHyPcRead(i).vld := io.mem.issueHylda(i).valid
491    ctrlBlock.io.memHyPcRead(i).ptr := io.mem.issueHylda(i).bits.uop.ftqPtr
492    ctrlBlock.io.memHyPcRead(i).offset := io.mem.issueHylda(i).bits.uop.ftqOffset
493  })
494
495  ctrlBlock.io.robio.robHeadLsIssue := io.mem.issueUops.map(deq => deq.fire && deq.bits.uop.robIdx === ctrlBlock.io.robio.robDeqPtr).reduce(_ || _)
496
497  // mem io
498  io.mem.lsqEnqIO <> memScheduler.io.memIO.get.lsqEnqIO
499  io.mem.robLsqIO <> ctrlBlock.io.robio.lsq
500
501  io.frontendSfence := fenceio.sfence
502  io.frontendTlbCsr := csrio.tlb
503  io.frontendCsrCtrl := csrio.customCtrl
504
505  io.tlb <> csrio.tlb
506
507  io.csrCustomCtrl := csrio.customCtrl
508
509  io.toTop.cpuHalted := false.B // TODO: implement cpu halt
510
511  io.debugTopDown.fromRob := ctrlBlock.io.debugTopDown.fromRob
512  ctrlBlock.io.debugTopDown.fromCore := io.debugTopDown.fromCore
513
514  io.debugRolling := ctrlBlock.io.debugRolling
515
516  if(backendParams.debugEn) {
517    dontTouch(memScheduler.io)
518    dontTouch(dataPath.io.toMemExu)
519    dontTouch(wbDataPath.io.fromMemExu)
520  }
521}
522
523class BackendMemIO(implicit p: Parameters, params: BackendParams) extends XSBundle {
524  // Since fast load replay always use load unit 0, Backend flips two load port to avoid conflicts
525  val flippedLda = true
526  // params alias
527  private val LoadQueueSize = VirtualLoadQueueSize
528  // In/Out // Todo: split it into one-direction bundle
529  val lsqEnqIO = Flipped(new LsqEnqIO)
530  val robLsqIO = new RobLsqIO
531  val ldaIqFeedback = Vec(params.LduCnt, Flipped(new MemRSFeedbackIO))
532  val staIqFeedback = Vec(params.StaCnt, Flipped(new MemRSFeedbackIO))
533  val hyuIqFeedback = Vec(params.HyuCnt, Flipped(new MemRSFeedbackIO))
534  val ldCancel = Vec(params.LdExuCnt, Flipped(new LoadCancelIO))
535  val wakeup = Vec(params.LdExuCnt, Flipped(Valid(new DynInst)))
536  val loadPcRead = Vec(params.LduCnt, Output(UInt(VAddrBits.W)))
537  val storePcRead = Vec(params.StaCnt, Output(UInt(VAddrBits.W)))
538  val hyuPcRead = Vec(params.HyuCnt, Output(UInt(VAddrBits.W)))
539  // Input
540  val writebackLda = Vec(params.LduCnt, Flipped(DecoupledIO(new MemExuOutput)))
541  val writebackSta = Vec(params.StaCnt, Flipped(DecoupledIO(new MemExuOutput)))
542  val writebackStd = Vec(params.StdCnt, Flipped(DecoupledIO(new MemExuOutput)))
543  val writebackHyuLda = Vec(params.HyuCnt, Flipped(DecoupledIO(new MemExuOutput)))
544  val writebackHyuSta = Vec(params.HyuCnt, Flipped(DecoupledIO(new MemExuOutput)))
545  val writebackVldu = Vec(params.VlduCnt, Flipped(DecoupledIO(new MemExuOutput(true))))
546
547  val s3_delayed_load_error = Input(Vec(LoadPipelineWidth, Bool()))
548  val stIn = Input(Vec(params.StaExuCnt, ValidIO(new DynInst())))
549  val memoryViolation = Flipped(ValidIO(new Redirect))
550  val exceptionVAddr = Input(UInt(VAddrBits.W))
551  val sqDeq = Input(UInt(log2Ceil(EnsbufferWidth + 1).W))
552  val lqDeq = Input(UInt(log2Up(CommitWidth + 1).W))
553  val sqDeqPtr = Input(new SqPtr)
554  val lqDeqPtr = Input(new LqPtr)
555
556  val lqCancelCnt = Input(UInt(log2Up(VirtualLoadQueueSize + 1).W))
557  val sqCancelCnt = Input(UInt(log2Up(StoreQueueSize + 1).W))
558
559  val lqCanAccept = Input(Bool())
560  val sqCanAccept = Input(Bool())
561
562  val otherFastWakeup = Flipped(Vec(params.LduCnt + params.HyuCnt, ValidIO(new DynInst)))
563  val stIssuePtr = Input(new SqPtr())
564
565  val csrDistributedUpdate = Flipped(new DistributedCSRUpdateReq)
566
567  val debugLS = Flipped(Output(new DebugLSIO))
568
569  val lsTopdownInfo = Vec(params.LduCnt + params.HyuCnt, Flipped(Output(new LsTopdownInfo)))
570  // Output
571  val redirect = ValidIO(new Redirect)   // rob flush MemBlock
572  val issueLda = MixedVec(Seq.fill(params.LduCnt)(DecoupledIO(new MemExuInput())))
573  val issueSta = MixedVec(Seq.fill(params.StaCnt)(DecoupledIO(new MemExuInput())))
574  val issueStd = MixedVec(Seq.fill(params.StdCnt)(DecoupledIO(new MemExuInput())))
575  val issueHylda = MixedVec(Seq.fill(params.HyuCnt)(DecoupledIO(new MemExuInput())))
576  val issueHysta = MixedVec(Seq.fill(params.HyuCnt)(DecoupledIO(new MemExuInput())))
577  val issueVldu = MixedVec(Seq.fill(params.VlduCnt)(DecoupledIO(new MemExuInput(true))))
578
579  val loadFastMatch = Vec(params.LduCnt, Output(UInt(params.LduCnt.W)))
580  val loadFastImm   = Vec(params.LduCnt, Output(UInt(12.W))) // Imm_I
581
582  val tlbCsr = Output(new TlbCsrBundle)
583  val csrCtrl = Output(new CustomCSRCtrlIO)
584  val sfence = Output(new SfenceBundle)
585  val isStoreException = Output(Bool())
586  val isVlsException = Output(Bool())
587
588  // ATTENTION: The issue ports' sequence order should be the same as IQs' deq config
589  private [backend] def issueUops: Seq[DecoupledIO[MemExuInput]] = {
590    issueSta ++
591      issueHylda ++ issueHysta ++
592      issueLda ++
593      issueVldu ++
594      issueStd
595  }.toSeq
596
597  // ATTENTION: The writeback ports' sequence order should be the same as IQs' deq config
598  private [backend] def writeBack: Seq[DecoupledIO[MemExuOutput]] = {
599    writebackSta ++
600      writebackHyuLda ++ writebackHyuSta ++
601      writebackLda ++
602      writebackVldu ++
603      writebackStd
604  }
605}
606
607class BackendIO(implicit p: Parameters, params: BackendParams) extends XSBundle {
608  val fromTop = new Bundle {
609    val hartId = Input(UInt(8.W))
610    val externalInterrupt = new ExternalInterruptIO
611  }
612
613  val toTop = new Bundle {
614    val cpuHalted = Output(Bool())
615  }
616
617  val fenceio = new FenceIO
618  // Todo: merge these bundles into BackendFrontendIO
619  val frontend = Flipped(new FrontendToCtrlIO)
620  val frontendSfence = Output(new SfenceBundle)
621  val frontendCsrCtrl = Output(new CustomCSRCtrlIO)
622  val frontendTlbCsr = Output(new TlbCsrBundle)
623  // distributed csr write
624  val frontendCsrDistributedUpdate = Flipped(new DistributedCSRUpdateReq)
625
626  val mem = new BackendMemIO
627
628  val perf = Input(new PerfCounterIO)
629
630  val tlb = Output(new TlbCsrBundle)
631
632  val csrCustomCtrl = Output(new CustomCSRCtrlIO)
633
634  val debugTopDown = new Bundle {
635    val fromRob = new RobCoreTopDownIO
636    val fromCore = new CoreDispatchTopDownIO
637  }
638  val debugRolling = new RobDebugRollingIO
639}
640