xref: /XiangShan/src/main/scala/xiangshan/backend/issue/Scheduler.scala (revision 0a7d1d5cc74078a0d2fe9270a78ac80db6cb1ad0)
1package xiangshan.backend.issue
2
3import org.chipsalliance.cde.config.Parameters
4import chisel3._
5import chisel3.util._
6import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp}
7import utility.HasPerfEvents
8import utils.OptionWrapper
9import xiangshan._
10import xiangshan.backend.Bundles._
11import xiangshan.backend.datapath.DataConfig._
12import xiangshan.backend.datapath.WbConfig._
13import xiangshan.backend.fu.FuType
14import xiangshan.backend.regfile.RfWritePortWithConfig
15import xiangshan.mem.{LsqEnqCtrl, LsqEnqIO, MemWaitUpdateReq, SqPtr, LqPtr}
16import xiangshan.backend.datapath.WbConfig.V0WB
17import xiangshan.backend.regfile.VlPregParams
18import xiangshan.backend.regcache.RegCacheTagTable
19
20sealed trait SchedulerType
21
22case class IntScheduler() extends SchedulerType
23case class FpScheduler() extends SchedulerType
24case class MemScheduler() extends SchedulerType
25case class VfScheduler() extends SchedulerType
26case class NoScheduler() extends SchedulerType
27
28class Scheduler(val params: SchdBlockParams)(implicit p: Parameters) extends LazyModule with HasXSParameter {
29  override def shouldBeInlined: Boolean = false
30
31  val numIntStateWrite = backendParams.numPregWb(IntData())
32  val numFpStateWrite = backendParams.numPregWb(FpData())
33  val numVfStateWrite = backendParams.numPregWb(VecData())
34  val numV0StateWrite = backendParams.numPregWb(V0Data())
35  val numVlStateWrite = backendParams.numPregWb(VlData())
36
37  val issueQueue = params.issueBlockParams.map(x => LazyModule(new IssueQueue(x).suggestName(x.getIQName)))
38
39  lazy val module: SchedulerImpBase = params.schdType match {
40    case IntScheduler() => new SchedulerArithImp(this)(params, p)
41    case FpScheduler()  => new SchedulerArithImp(this)(params, p)
42    case MemScheduler() => new SchedulerMemImp(this)(params, p)
43    case VfScheduler() => new SchedulerArithImp(this)(params, p)
44    case _ => null
45  }
46}
47
48class SchedulerIO()(implicit params: SchdBlockParams, p: Parameters) extends XSBundle {
49  // params alias
50  private val LoadQueueSize = VirtualLoadQueueSize
51  val fromDispatchUopNum = params.issueBlockParams.filter(x => x.StdCnt == 0).map(_.numEnq).sum
52  val allIssueParams = params.issueBlockParams.filter(_.StdCnt == 0)
53  val IssueQueueDeqSum = allIssueParams.map(_.numDeq).sum
54  val maxIQSize = allIssueParams.map(_.numEntries).max
55  val fromTop = new Bundle {
56    val hartId = Input(UInt(8.W))
57  }
58  val fromWbFuBusyTable = new Bundle{
59    val fuBusyTableRead = MixedVec(params.issueBlockParams.map(x => Input(x.genWbFuBusyTableReadBundle)))
60  }
61  val wbFuBusyTable = MixedVec(params.issueBlockParams.map(x => Output(x.genWbFuBusyTableWriteBundle)))
62  val IQValidNumVec = Output(Vec(IssueQueueDeqSum, UInt((maxIQSize).U.getWidth.W)))
63
64  val fromCtrlBlock = new Bundle {
65    val flush = Flipped(ValidIO(new Redirect))
66  }
67  val fromDispatch = new Bundle {
68    val uops =  Vec(fromDispatchUopNum, Flipped(DecoupledIO(new DynInst)))
69  }
70  val intWriteBack = MixedVec(Vec(backendParams.numPregWb(IntData()),
71    new RfWritePortWithConfig(backendParams.intPregParams.dataCfg, backendParams.intPregParams.addrWidth)))
72  val fpWriteBack = MixedVec(Vec(backendParams.numPregWb(FpData()),
73    new RfWritePortWithConfig(backendParams.fpPregParams.dataCfg, backendParams.fpPregParams.addrWidth)))
74  val vfWriteBack = MixedVec(Vec(backendParams.numPregWb(VecData()),
75    new RfWritePortWithConfig(backendParams.vfPregParams.dataCfg, backendParams.vfPregParams.addrWidth)))
76  val v0WriteBack = MixedVec(Vec(backendParams.numPregWb(V0Data()),
77    new RfWritePortWithConfig(backendParams.v0PregParams.dataCfg, backendParams.v0PregParams.addrWidth)))
78  val vlWriteBack = MixedVec(Vec(backendParams.numPregWb(VlData()),
79    new RfWritePortWithConfig(backendParams.vlPregParams.dataCfg, backendParams.vlPregParams.addrWidth)))
80  val toDataPathAfterDelay: MixedVec[MixedVec[DecoupledIO[IssueQueueIssueBundle]]] = MixedVec(params.issueBlockParams.map(_.genIssueDecoupledBundle))
81
82  val vlWriteBackInfo = new Bundle {
83    val vlFromIntIsZero  = Input(Bool())
84    val vlFromIntIsVlmax = Input(Bool())
85    val vlFromVfIsZero   = Input(Bool())
86    val vlFromVfIsVlmax  = Input(Bool())
87  }
88
89  val fromSchedulers = new Bundle {
90    val wakeupVec: MixedVec[ValidIO[IssueQueueIQWakeUpBundle]] = Flipped(params.genIQWakeUpInValidBundle)
91  }
92
93  val toSchedulers = new Bundle {
94    val wakeupVec: MixedVec[ValidIO[IssueQueueIQWakeUpBundle]] = params.genIQWakeUpOutValidBundle
95  }
96
97  val fromDataPath = new Bundle {
98    val resp: MixedVec[MixedVec[OGRespBundle]] = MixedVec(params.issueBlockParams.map(x => Flipped(x.genOGRespBundle)))
99    val og0Cancel = Input(ExuVec())
100    // Todo: remove this after no cancel signal from og1
101    val og1Cancel = Input(ExuVec())
102    // replace RCIdx to Wakeup Queue
103    val replaceRCIdx = OptionWrapper(params.needWriteRegCache, Vec(params.numWriteRegCache, Input(UInt(RegCacheIdxWidth.W))))
104    // just be compatible to old code
105    def apply(i: Int)(j: Int) = resp(i)(j)
106  }
107
108  val loadFinalIssueResp = MixedVec(params.issueBlockParams.map(x => MixedVec(Vec(x.LdExuCnt, Flipped(ValidIO(new IssueQueueDeqRespBundle()(p, x)))))))
109  val vecLoadFinalIssueResp = MixedVec(params.issueBlockParams.map(x => MixedVec(Vec(x.VlduCnt, Flipped(ValidIO(new IssueQueueDeqRespBundle()(p, x)))))))
110  val memAddrIssueResp = MixedVec(params.issueBlockParams.map(x => MixedVec(Vec(x.LdExuCnt, Flipped(ValidIO(new IssueQueueDeqRespBundle()(p, x)))))))
111  val vecLoadIssueResp = MixedVec(params.issueBlockParams.map(x => MixedVec(Vec(x.VlduCnt, Flipped(ValidIO(new IssueQueueDeqRespBundle()(p, x)))))))
112
113  val ldCancel = Vec(backendParams.LduCnt + backendParams.HyuCnt, Flipped(new LoadCancelIO))
114
115  val fromMem = if (params.isMemSchd) Some(new Bundle {
116    val ldaFeedback = Flipped(Vec(params.LduCnt, new MemRSFeedbackIO))
117    val staFeedback = Flipped(Vec(params.StaCnt, new MemRSFeedbackIO))
118    val hyuFeedback = Flipped(Vec(params.HyuCnt, new MemRSFeedbackIO))
119    val vstuFeedback = Flipped(Vec(params.VstuCnt, new MemRSFeedbackIO(isVector = true)))
120    val vlduFeedback = Flipped(Vec(params.VlduCnt, new MemRSFeedbackIO(isVector = true)))
121    val stIssuePtr = Input(new SqPtr())
122    val lcommit = Input(UInt(log2Up(CommitWidth + 1).W))
123    val scommit = Input(UInt(log2Ceil(EnsbufferWidth + 1).W)) // connected to `memBlock.io.sqDeq` instead of ROB
124    val wakeup = Vec(params.LdExuCnt, Flipped(Valid(new DynInst)))
125    val lqDeqPtr = Input(new LqPtr)
126    val sqDeqPtr = Input(new SqPtr)
127    // from lsq
128    val lqCancelCnt = Input(UInt(log2Up(LoadQueueSize + 1).W))
129    val sqCancelCnt = Input(UInt(log2Up(StoreQueueSize + 1).W))
130    val memWaitUpdateReq = Flipped(new MemWaitUpdateReq)
131  }) else None
132  val toMem = if (params.isMemSchd) Some(new Bundle {
133    val loadFastMatch = Output(Vec(params.LduCnt, new IssueQueueLoadBundle))
134  }) else None
135  val fromOg2Resp = if(params.needOg2Resp) Some(MixedVec(params.issueBlockParams.filter(_.needOg2Resp).map(x => Flipped(x.genOG2RespBundle)))) else None
136}
137
138abstract class SchedulerImpBase(wrapper: Scheduler)(implicit params: SchdBlockParams, p: Parameters)
139  extends LazyModuleImp(wrapper)
140    with HasXSParameter
141{
142  val io = IO(new SchedulerIO())
143
144  // alias
145  private val iqWakeUpInMap: Map[Int, ValidIO[IssueQueueIQWakeUpBundle]] =
146    io.fromSchedulers.wakeupVec.map(x => (x.bits.exuIdx, x)).toMap
147  private val schdType = params.schdType
148
149  // Modules
150  val issueQueues: Seq[IssueQueueImp] = wrapper.issueQueue.map(_.module)
151
152  io.IQValidNumVec := issueQueues.filter(_.params.StdCnt == 0).map(_.io.validCntDeqVec).flatten
153  val wakeupFromIntWBVec = Wire(params.genIntWBWakeUpSinkValidBundle)
154  val wakeupFromFpWBVec = Wire(params.genFpWBWakeUpSinkValidBundle)
155  val wakeupFromVfWBVec = Wire(params.genVfWBWakeUpSinkValidBundle)
156  val wakeupFromV0WBVec = Wire(params.genV0WBWakeUpSinkValidBundle)
157  val wakeupFromVlWBVec = Wire(params.genVlWBWakeUpSinkValidBundle)
158
159  wakeupFromIntWBVec.zip(io.intWriteBack).foreach { case (sink, source) =>
160    sink.valid := source.wen
161    sink.bits.rfWen := source.intWen
162    sink.bits.fpWen := source.fpWen
163    sink.bits.vecWen := source.vecWen
164    sink.bits.v0Wen := source.v0Wen
165    sink.bits.vlWen := source.vlWen
166    sink.bits.pdest := source.addr
167  }
168
169  wakeupFromFpWBVec.zip(io.fpWriteBack).foreach { case (sink, source) =>
170    sink.valid := source.wen
171    sink.bits.rfWen := source.intWen
172    sink.bits.fpWen := source.fpWen
173    sink.bits.vecWen := source.vecWen
174    sink.bits.v0Wen := source.v0Wen
175    sink.bits.vlWen := source.vlWen
176    sink.bits.pdest := source.addr
177  }
178
179  wakeupFromVfWBVec.zip(io.vfWriteBack).foreach { case (sink, source) =>
180    sink.valid := source.wen
181    sink.bits.rfWen := source.intWen
182    sink.bits.fpWen := source.fpWen
183    sink.bits.vecWen := source.vecWen
184    sink.bits.v0Wen := source.v0Wen
185    sink.bits.vlWen := source.vlWen
186    sink.bits.pdest := source.addr
187  }
188
189  wakeupFromV0WBVec.zip(io.v0WriteBack).foreach { case (sink, source) =>
190    sink.valid := source.wen
191    sink.bits.rfWen := source.intWen
192    sink.bits.fpWen := source.fpWen
193    sink.bits.vecWen := source.vecWen
194    sink.bits.v0Wen := source.v0Wen
195    sink.bits.vlWen := source.vlWen
196    sink.bits.pdest := source.addr
197  }
198
199  wakeupFromVlWBVec.zip(io.vlWriteBack).foreach { case (sink, source) =>
200    sink.valid := source.wen
201    sink.bits.rfWen := source.intWen
202    sink.bits.fpWen := source.fpWen
203    sink.bits.vecWen := source.vecWen
204    sink.bits.v0Wen := source.v0Wen
205    sink.bits.vlWen := source.vlWen
206    sink.bits.pdest := source.addr
207  }
208
209  // Connect bundles having the same wakeup source
210  issueQueues.zipWithIndex.foreach { case(iq, i) =>
211    iq.io.wakeupFromIQ.foreach { wakeUp =>
212      val wakeUpIn = iqWakeUpInMap(wakeUp.bits.exuIdx)
213      val exuIdx = wakeUp.bits.exuIdx
214      println(s"[Backend] Connect wakeup exuIdx ${exuIdx}")
215      connectSamePort(wakeUp,wakeUpIn)
216      backendParams.connectWakeup(exuIdx)
217      if (backendParams.isCopyPdest(exuIdx)) {
218        println(s"[Backend] exuIdx ${exuIdx} use pdestCopy ${backendParams.getCopyPdestIndex(exuIdx)}")
219        wakeUp.bits.pdest := wakeUpIn.bits.pdestCopy.get(backendParams.getCopyPdestIndex(exuIdx))
220        if (wakeUpIn.bits.rfWenCopy.nonEmpty) wakeUp.bits.rfWen := wakeUpIn.bits.rfWenCopy.get(backendParams.getCopyPdestIndex(exuIdx))
221        if (wakeUpIn.bits.fpWenCopy.nonEmpty) wakeUp.bits.fpWen := wakeUpIn.bits.fpWenCopy.get(backendParams.getCopyPdestIndex(exuIdx))
222        if (wakeUpIn.bits.vecWenCopy.nonEmpty) wakeUp.bits.vecWen := wakeUpIn.bits.vecWenCopy.get(backendParams.getCopyPdestIndex(exuIdx))
223        if (wakeUpIn.bits.v0WenCopy.nonEmpty) wakeUp.bits.v0Wen := wakeUpIn.bits.v0WenCopy.get(backendParams.getCopyPdestIndex(exuIdx))
224        if (wakeUpIn.bits.vlWenCopy.nonEmpty) wakeUp.bits.vlWen := wakeUpIn.bits.vlWenCopy.get(backendParams.getCopyPdestIndex(exuIdx))
225        if (wakeUpIn.bits.loadDependencyCopy.nonEmpty) wakeUp.bits.loadDependency := wakeUpIn.bits.loadDependencyCopy.get(backendParams.getCopyPdestIndex(exuIdx))
226      }
227      if (iq.params.numIntSrc == 0) wakeUp.bits.rfWen := false.B
228      if (iq.params.numFpSrc == 0)  wakeUp.bits.fpWen := false.B
229      if (iq.params.numVfSrc == 0)  wakeUp.bits.vecWen := false.B
230      if (iq.params.numV0Src == 0)  wakeUp.bits.v0Wen := false.B
231      if (iq.params.numVlSrc == 0)  wakeUp.bits.vlWen := false.B
232    }
233    iq.io.og0Cancel := io.fromDataPath.og0Cancel
234    iq.io.og1Cancel := io.fromDataPath.og1Cancel
235    if (iq.params.needLoadDependency)
236      iq.io.ldCancel := io.ldCancel
237    else
238      iq.io.ldCancel := 0.U.asTypeOf(io.ldCancel)
239  }
240
241  // connect the vl writeback informatino to the issue queues
242  issueQueues.zipWithIndex.foreach { case(iq, i) =>
243    iq.io.vlFromIntIsVlmax := io.vlWriteBackInfo.vlFromIntIsVlmax
244    iq.io.vlFromIntIsZero := io.vlWriteBackInfo.vlFromIntIsZero
245    iq.io.vlFromVfIsVlmax := io.vlWriteBackInfo.vlFromVfIsVlmax
246    iq.io.vlFromVfIsZero := io.vlWriteBackInfo.vlFromVfIsZero
247  }
248
249  private val iqWakeUpOutMap: Map[Int, ValidIO[IssueQueueIQWakeUpBundle]] =
250    issueQueues.flatMap(_.io.wakeupToIQ)
251      .map(x => (x.bits.exuIdx, x))
252      .toMap
253
254  // Connect bundles having the same wakeup source
255  io.toSchedulers.wakeupVec.foreach { wakeUp =>
256    wakeUp := iqWakeUpOutMap(wakeUp.bits.exuIdx)
257  }
258
259  io.toDataPathAfterDelay.zipWithIndex.foreach { case (toDpDy, i) =>
260    toDpDy <> issueQueues(i).io.deqDelay
261  }
262
263  // Response
264  issueQueues.zipWithIndex.foreach { case (iq, i) =>
265    iq.io.og0Resp.zipWithIndex.foreach { case (og0Resp, j) =>
266      og0Resp := io.fromDataPath(i)(j).og0resp
267    }
268    iq.io.og1Resp.zipWithIndex.foreach { case (og1Resp, j) =>
269      og1Resp := io.fromDataPath(i)(j).og1resp
270    }
271    iq.io.finalIssueResp.foreach(_.zipWithIndex.foreach { case (finalIssueResp, j) =>
272      if (io.loadFinalIssueResp(i).isDefinedAt(j) && iq.params.isLdAddrIQ) {
273        finalIssueResp := io.loadFinalIssueResp(i)(j)
274      } else if (io.vecLoadFinalIssueResp(i).isDefinedAt(j) && iq.params.isVecLduIQ) {
275        finalIssueResp := io.vecLoadFinalIssueResp(i)(j)
276      }
277      else {
278        finalIssueResp := 0.U.asTypeOf(finalIssueResp)
279      }
280    })
281    iq.io.memAddrIssueResp.foreach(_.zipWithIndex.foreach { case (memAddrIssueResp, j) =>
282      if (io.memAddrIssueResp(i).isDefinedAt(j)) {
283        memAddrIssueResp := io.memAddrIssueResp(i)(j)
284      } else {
285        memAddrIssueResp := 0.U.asTypeOf(memAddrIssueResp)
286      }
287    })
288    iq.io.vecLoadIssueResp.foreach(_.zipWithIndex.foreach { case (resp, deqIdx) =>
289      resp := io.vecLoadIssueResp(i)(deqIdx)
290    })
291    iq.io.wbBusyTableRead := io.fromWbFuBusyTable.fuBusyTableRead(i)
292    io.wbFuBusyTable(i) := iq.io.wbBusyTableWrite
293    iq.io.replaceRCIdx.foreach(x => x := 0.U.asTypeOf(x))
294  }
295  if (params.needOg2Resp) {
296    issueQueues.filter(_.params.needOg2Resp).zip(io.fromOg2Resp.get).foreach{ case (iq, og2RespVec) =>
297      iq.io.og2Resp.get.zip(og2RespVec).foreach{ case (iqOg2Resp, og2Resp) =>
298        iqOg2Resp := og2Resp
299      }
300    }
301  }
302
303  // Connect each replace RCIdx to IQ
304  if (params.needWriteRegCache) {
305    val iqReplaceRCIdxVec = issueQueues.filter(_.params.needWriteRegCache).flatMap{ iq =>
306      iq.params.allExuParams.zip(iq.io.replaceRCIdx.get).filter(_._1.needWriteRegCache).map(_._2)
307    }
308    iqReplaceRCIdxVec.zip(io.fromDataPath.replaceRCIdx.get).foreach{ case (iq, in) =>
309      iq := in
310    }
311
312    println(s"[Scheduler] numWriteRegCache: ${params.numWriteRegCache}")
313    println(s"[Scheduler] iqReplaceRCIdxVec: ${iqReplaceRCIdxVec.size}")
314  }
315
316  // perfEvent
317  val lastCycleIqEnqFireVec    = RegNext(VecInit(issueQueues.map(_.io.enq.map(_.fire)).flatten))
318  val lastCycleIqFullVec       = RegNext(VecInit(issueQueues.map(_.io.enq.head.ready)))
319
320  val issueQueueFullVecPerf = issueQueues.zip(lastCycleIqFullVec)map{ case (iq, full) => (iq.params.getIQName + s"_full", full) }
321  val basePerfEvents = Seq(
322    ("issueQueue_enq_fire_cnt",  PopCount(lastCycleIqEnqFireVec)                    )
323  )  ++ issueQueueFullVecPerf
324
325  println(s"[Scheduler] io.fromSchedulers.wakeupVec: ${io.fromSchedulers.wakeupVec.map(x => backendParams.getExuName(x.bits.exuIdx))}")
326  println(s"[Scheduler] iqWakeUpInKeys: ${iqWakeUpInMap.keys}")
327
328  println(s"[Scheduler] iqWakeUpOutKeys: ${iqWakeUpOutMap.keys}")
329  println(s"[Scheduler] io.toSchedulers.wakeupVec: ${io.toSchedulers.wakeupVec.map(x => backendParams.getExuName(x.bits.exuIdx))}")
330}
331
332class SchedulerArithImp(override val wrapper: Scheduler)(implicit params: SchdBlockParams, p: Parameters)
333  extends SchedulerImpBase(wrapper)
334    with HasXSParameter
335    with HasPerfEvents
336{
337  val issueQueuesUopIn = issueQueues.map(_.io.enq).flatten
338  issueQueuesUopIn.zip(io.fromDispatch.uops).map(x => x._1 <> x._2)
339  issueQueues.zipWithIndex.foreach { case (iq, i) =>
340    iq.io.flush <> io.fromCtrlBlock.flush
341    if (!iq.params.needLoadDependency) {
342      iq.io.enq.map(x => x.bits.srcLoadDependency := 0.U.asTypeOf(x.bits.srcLoadDependency))
343    }
344    val intWBIQ = params.schdType match {
345      case IntScheduler() => wakeupFromIntWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromIntWBPort.keys.toSeq.contains(x._2)).map(_._1)
346      case FpScheduler() => wakeupFromFpWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromFpWBPort.keys.toSeq.contains(x._2)).map(_._1)
347      case VfScheduler() => (wakeupFromVfWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromVfWBPort.keys.toSeq.contains(x._2)).map(_._1) ++
348                             wakeupFromV0WBVec.zipWithIndex.filter(x => iq.params.needWakeupFromV0WBPort.keys.toSeq.contains(x._2)).map(_._1) ++
349                             wakeupFromVlWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromVlWBPort.keys.toSeq.contains(x._2)).map(_._1))
350      case _ => null
351    }
352    iq.io.wakeupFromWB.zip(intWBIQ).foreach{ case (sink, source) => sink := source}
353  }
354
355  val perfEvents = basePerfEvents
356  generatePerfEvent()
357}
358
359// FIXME: Vector mem instructions may not be handled properly!
360class SchedulerMemImp(override val wrapper: Scheduler)(implicit params: SchdBlockParams, p: Parameters)
361  extends SchedulerImpBase(wrapper)
362    with HasXSParameter
363    with HasPerfEvents
364{
365
366  val issueQueuesUopIn = issueQueues.filter(_.params.StdCnt == 0).map(_.io.enq).flatten
367  issueQueuesUopIn.zip(io.fromDispatch.uops).map(x => x._1 <> x._2)
368  val noStdExuParams = params.issueBlockParams.map(x => Seq.fill(x.numEnq)(x.exuBlockParams)).flatten.filter{x => x.map(!_.hasStdFu).reduce(_ && _)}
369  val staIdx = noStdExuParams.zipWithIndex.filter{x => x._1.map(_.hasStoreAddrFu).reduce(_ || _)}.map(_._2)
370  val staReady = issueQueues.filter(iq => iq.params.StaCnt > 0).map(_.io.enq.map(_.ready)).flatten
371  val stdReady = issueQueues.filter(iq => iq.params.StdCnt > 0).map(_.io.enq.map(_.ready)).flatten
372  staIdx.zipWithIndex.map{ case (sta, i) => {
373    io.fromDispatch.uops(sta).ready := staReady(i) && stdReady(i)
374  }}
375  val memAddrIQs = issueQueues.filter(_.params.isMemAddrIQ)
376  val stAddrIQs = issueQueues.filter(iq => iq.params.StaCnt > 0) // included in memAddrIQs
377  val ldAddrIQs = issueQueues.filter(iq => iq.params.LduCnt > 0)
378  val stDataIQs = issueQueues.filter(iq => iq.params.StdCnt > 0)
379  val vecMemIQs = issueQueues.filter(_.params.isVecMemIQ)
380  val (hyuIQs, hyuIQIdxs) = issueQueues.zipWithIndex.filter(_._1.params.HyuCnt > 0).unzip
381
382  println(s"[SchedulerMemImp] memAddrIQs.size: ${memAddrIQs.size}, enq.size: ${memAddrIQs.map(_.io.enq.size).sum}")
383  println(s"[SchedulerMemImp] stAddrIQs.size:  ${stAddrIQs.size }, enq.size: ${stAddrIQs.map(_.io.enq.size).sum}")
384  println(s"[SchedulerMemImp] ldAddrIQs.size:  ${ldAddrIQs.size }, enq.size: ${ldAddrIQs.map(_.io.enq.size).sum}")
385  println(s"[SchedulerMemImp] stDataIQs.size:  ${stDataIQs.size }, enq.size: ${stDataIQs.map(_.io.enq.size).sum}")
386  println(s"[SchedulerMemImp] hyuIQs.size:     ${hyuIQs.size    }, enq.size: ${hyuIQs.map(_.io.enq.size).sum}")
387  require(memAddrIQs.nonEmpty && stDataIQs.nonEmpty)
388
389  io.toMem.get.loadFastMatch := 0.U.asTypeOf(io.toMem.get.loadFastMatch) // TODO: is still needed?
390
391  private val loadWakeUp = issueQueues.filter(_.params.LdExuCnt > 0).map(_.asInstanceOf[IssueQueueMemAddrImp].io.memIO.get.loadWakeUp).flatten
392  require(loadWakeUp.length == io.fromMem.get.wakeup.length)
393  loadWakeUp.zip(io.fromMem.get.wakeup).foreach(x => x._1 := x._2)
394
395  memAddrIQs.zipWithIndex.foreach { case (iq, i) =>
396    iq.io.flush <> io.fromCtrlBlock.flush
397    if (!iq.params.needLoadDependency) {
398      iq.io.enq.map(x => x.bits.srcLoadDependency := 0.U.asTypeOf(x.bits.srcLoadDependency))
399    }
400    iq.io.wakeupFromWB.zip(
401      wakeupFromIntWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromIntWBPort.keys.toSeq.contains(x._2)).map(_._1) ++
402      wakeupFromFpWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromFpWBPort.keys.toSeq.contains(x._2)).map(_._1) ++
403      wakeupFromVfWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromVfWBPort.keys.toSeq.contains(x._2)).map(_._1) ++
404      wakeupFromV0WBVec.zipWithIndex.filter(x => iq.params.needWakeupFromV0WBPort.keys.toSeq.contains(x._2)).map(_._1) ++
405      wakeupFromVlWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromVlWBPort.keys.toSeq.contains(x._2)).map(_._1)
406    ).foreach{ case (sink, source) => sink := source}
407  }
408
409  ldAddrIQs.zipWithIndex.foreach {
410    case (imp: IssueQueueMemAddrImp, i) =>
411      imp.io.memIO.get.feedbackIO.head := 0.U.asTypeOf(imp.io.memIO.get.feedbackIO.head)
412      imp.io.memIO.get.checkWait.stIssuePtr := io.fromMem.get.stIssuePtr
413      imp.io.memIO.get.checkWait.memWaitUpdateReq := io.fromMem.get.memWaitUpdateReq
414    case _ =>
415  }
416
417  stAddrIQs.zipWithIndex.foreach {
418    case (imp: IssueQueueMemAddrImp, i) =>
419      imp.io.memIO.get.feedbackIO.head := io.fromMem.get.staFeedback(i)
420      imp.io.memIO.get.checkWait.stIssuePtr := io.fromMem.get.stIssuePtr
421      imp.io.memIO.get.checkWait.memWaitUpdateReq := io.fromMem.get.memWaitUpdateReq
422    case _ =>
423  }
424
425  hyuIQs.zip(hyuIQIdxs).foreach {
426    case (imp: IssueQueueMemAddrImp, idx) =>
427      imp.io.memIO.get.feedbackIO.head := io.fromMem.get.hyuFeedback.head
428      imp.io.memIO.get.feedbackIO(1) := 0.U.asTypeOf(imp.io.memIO.get.feedbackIO(1))
429      imp.io.memIO.get.checkWait.stIssuePtr := io.fromMem.get.stIssuePtr
430      imp.io.memIO.get.checkWait.memWaitUpdateReq := io.fromMem.get.memWaitUpdateReq
431      // TODO: refactor ditry code
432      imp.io.deqDelay(1).ready := false.B
433      io.toDataPathAfterDelay(idx)(1).valid := false.B
434      io.toDataPathAfterDelay(idx)(1).bits := 0.U.asTypeOf(io.toDataPathAfterDelay(idx)(1).bits)
435    case _ =>
436  }
437
438  private val staIdxSeq = (stAddrIQs).map(iq => iq.params.idxInSchBlk)
439  private val hyaIdxSeq = (hyuIQs).map(iq => iq.params.idxInSchBlk)
440
441  println(s"[SchedulerMemImp] sta iq idx in memSchdBlock: $staIdxSeq")
442  println(s"[SchedulerMemImp] hya iq idx in memSchdBlock: $hyaIdxSeq")
443
444  private val staEnqs = stAddrIQs.map(_.io.enq).flatten
445  private val stdEnqs = stDataIQs.map(_.io.enq).flatten.take(staEnqs.size)
446  private val hyaEnqs = hyuIQs.map(_.io.enq).flatten
447  private val hydEnqs = stDataIQs.map(_.io.enq).flatten.drop(staEnqs.size)
448
449  require(staEnqs.size == stdEnqs.size, s"number of enq ports of store address IQs(${staEnqs.size}) " +
450  s"should be equal to number of enq ports of store data IQs(${stdEnqs.size})")
451
452  require(hyaEnqs.size == hydEnqs.size, s"number of enq ports of hybrid address IQs(${hyaEnqs.size}) " +
453  s"should be equal to number of enq ports of hybrid data IQs(${hydEnqs.size})")
454
455  stDataIQs.zipWithIndex.foreach { case (iq, i) =>
456    iq.io.flush <> io.fromCtrlBlock.flush
457    iq.io.wakeupFromWB.zip(
458      wakeupFromIntWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromIntWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++
459      wakeupFromFpWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromFpWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++
460      wakeupFromVfWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromVfWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++
461      wakeupFromV0WBVec.zipWithIndex.filter(x => iq.params.needWakeupFromV0WBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++
462      wakeupFromVlWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromVlWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq
463    ).foreach{ case (sink, source) => sink := source}
464    // here disable fp load fast wakeup to std, and no FEX wakeup to std
465    iq.io.wakeupFromIQ.map(_.bits.fpWen := false.B)
466  }
467
468  (stdEnqs ++ hydEnqs).zip(staEnqs ++ hyaEnqs).zipWithIndex.foreach { case ((stdIQEnq, staIQEnq), i) =>
469    stdIQEnq.valid := staIQEnq.valid && FuType.FuTypeOrR(staIQEnq.bits.fuType, FuType.stu, FuType.mou)
470    stdIQEnq.bits  := staIQEnq.bits
471    // Store data reuses store addr src(1) in dispatch2iq
472    // [dispatch2iq] --src*------src*(0)--> [staIQ|hyaIQ]
473    //                       \
474    //                        ---src*(1)--> [stdIQ]
475    // Since the src(1) of sta is easier to get, stdIQEnq.bits.src*(0) is assigned to staIQEnq.bits.src*(1)
476    // instead of dispatch2Iq.io.out(x).bits.src*(1)
477    val stdIdx = 1
478    stdIQEnq.bits.srcState(0) := staIQEnq.bits.srcState(stdIdx)
479    stdIQEnq.bits.srcLoadDependency(0) := staIQEnq.bits.srcLoadDependency(stdIdx)
480    stdIQEnq.bits.srcType(0) := staIQEnq.bits.srcType(stdIdx)
481    stdIQEnq.bits.psrc(0) := staIQEnq.bits.psrc(stdIdx)
482    stdIQEnq.bits.sqIdx := staIQEnq.bits.sqIdx
483    stdIQEnq.bits.useRegCache(0) := staIQEnq.bits.useRegCache(stdIdx)
484    stdIQEnq.bits.regCacheIdx(0) := staIQEnq.bits.regCacheIdx(stdIdx)
485  }
486
487  vecMemIQs.foreach {
488    case imp: IssueQueueVecMemImp =>
489      imp.io.memIO.get.sqDeqPtr.foreach(_ := io.fromMem.get.sqDeqPtr)
490      imp.io.memIO.get.lqDeqPtr.foreach(_ := io.fromMem.get.lqDeqPtr)
491      // not used
492      //imp.io.memIO.get.feedbackIO.head := io.fromMem.get.vstuFeedback.head // only vector store replay
493      // maybe not used
494      imp.io.memIO.get.checkWait.stIssuePtr := io.fromMem.get.stIssuePtr
495      imp.io.memIO.get.checkWait.memWaitUpdateReq := io.fromMem.get.memWaitUpdateReq
496      imp.io.wakeupFromWB.zip(
497        wakeupFromIntWBVec.zipWithIndex.filter(x => imp.params.needWakeupFromIntWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++
498        wakeupFromFpWBVec.zipWithIndex.filter(x => imp.params.needWakeupFromFpWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++
499        wakeupFromVfWBVec.zipWithIndex.filter(x => imp.params.needWakeupFromVfWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++
500        wakeupFromV0WBVec.zipWithIndex.filter(x => imp.params.needWakeupFromV0WBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++
501        wakeupFromVlWBVec.zipWithIndex.filter(x => imp.params.needWakeupFromVlWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq
502      ).foreach{ case (sink, source) => sink := source}
503
504    case _ =>
505  }
506  val vecMemFeedbackIO: Seq[MemRSFeedbackIO] = vecMemIQs.map {
507    case imp: IssueQueueVecMemImp =>
508      imp.io.memIO.get.feedbackIO
509  }.flatten
510  assert(vecMemFeedbackIO.size == io.fromMem.get.vstuFeedback.size, "vecMemFeedback size dont match!")
511  vecMemFeedbackIO.zip(io.fromMem.get.vstuFeedback).foreach{
512    case (sink, source) =>
513      sink := source
514  }
515
516  val perfEvents = basePerfEvents
517  generatePerfEvent()
518}
519