xref: /XiangShan/src/main/scala/xiangshan/backend/issue/IssueQueue.scala (revision aa2bcc3199f9e6b199af20fda352a22f9a67c044)
1package xiangshan.backend.issue
2
3import org.chipsalliance.cde.config.Parameters
4import chisel3._
5import chisel3.util._
6import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp}
7import utility.{GTimer, HasCircularQueuePtrHelper, SelectOne}
8import utils._
9import xiangshan._
10import xiangshan.backend.Bundles._
11import xiangshan.backend.issue.EntryBundles._
12import xiangshan.backend.decode.{ImmUnion, Imm_LUI_LOAD}
13import xiangshan.backend.datapath.DataConfig._
14import xiangshan.backend.datapath.DataSource
15import xiangshan.backend.fu.{FuConfig, FuType}
16import xiangshan.mem.{MemWaitUpdateReq, SqPtr, LqPtr}
17import xiangshan.backend.rob.RobPtr
18import xiangshan.backend.datapath.NewPipelineConnect
19
20class IssueQueue(params: IssueBlockParams)(implicit p: Parameters) extends LazyModule with HasXSParameter {
21  override def shouldBeInlined: Boolean = false
22
23  implicit val iqParams = params
24  lazy val module: IssueQueueImp = iqParams.schdType match {
25    case IntScheduler() => new IssueQueueIntImp(this)
26    case VfScheduler() => new IssueQueueVfImp(this)
27    case MemScheduler() =>
28      if (iqParams.StdCnt == 0 && !iqParams.isVecMemIQ) new IssueQueueMemAddrImp(this)
29      else if (iqParams.isVecMemIQ) new IssueQueueVecMemImp(this)
30      else new IssueQueueIntImp(this)
31    case _ => null
32  }
33}
34
35class IssueQueueStatusBundle(numEnq: Int, numEntries: Int) extends Bundle {
36  val empty = Output(Bool())
37  val full = Output(Bool())
38  val validCnt = Output(UInt(log2Ceil(numEntries).W))
39  val leftVec = Output(Vec(numEnq + 1, Bool()))
40}
41
42class IssueQueueDeqRespBundle(implicit p:Parameters, params: IssueBlockParams) extends EntryDeqRespBundle
43
44class IssueQueueIO()(implicit p: Parameters, params: IssueBlockParams) extends XSBundle {
45  // Inputs
46  val flush = Flipped(ValidIO(new Redirect))
47  val enq = Vec(params.numEnq, Flipped(DecoupledIO(new DynInst)))
48
49  val og0Resp = Vec(params.numDeq, Flipped(ValidIO(new IssueQueueDeqRespBundle)))
50  val og1Resp = Vec(params.numDeq, Flipped(ValidIO(new IssueQueueDeqRespBundle)))
51  val finalIssueResp = OptionWrapper(params.LdExuCnt > 0, Vec(params.numDeq, Flipped(ValidIO(new IssueQueueDeqRespBundle))))
52  val memAddrIssueResp = OptionWrapper(params.LdExuCnt > 0, Vec(params.numDeq, Flipped(ValidIO(new IssueQueueDeqRespBundle))))
53  val wbBusyTableRead = Input(params.genWbFuBusyTableReadBundle())
54  val wbBusyTableWrite = Output(params.genWbFuBusyTableWriteBundle())
55  val wakeupFromWB: MixedVec[ValidIO[IssueQueueWBWakeUpBundle]] = Flipped(params.genWBWakeUpSinkValidBundle)
56  val wakeupFromIQ: MixedVec[ValidIO[IssueQueueIQWakeUpBundle]] = Flipped(params.genIQWakeUpSinkValidBundle)
57  val og0Cancel = Input(ExuOH(backendParams.numExu))
58  val og1Cancel = Input(ExuOH(backendParams.numExu))
59  val ldCancel = Vec(backendParams.LduCnt + backendParams.HyuCnt, Flipped(new LoadCancelIO))
60  val finalBlock = Vec(params.numExu, Input(Bool()))
61
62  // Outputs
63  val wakeupToIQ: MixedVec[ValidIO[IssueQueueIQWakeUpBundle]] = params.genIQWakeUpSourceValidBundle
64  val status = Output(new IssueQueueStatusBundle(params.numEnq, params.numEntries))
65  // val statusNext = Output(new IssueQueueStatusBundle(params.numEnq))
66
67  val deqDelay: MixedVec[DecoupledIO[IssueQueueIssueBundle]] = params.genIssueDecoupledBundle// = deq.cloneType
68  def allWakeUp = wakeupFromWB ++ wakeupFromIQ
69}
70
71class IssueQueueImp(override val wrapper: IssueQueue)(implicit p: Parameters, val params: IssueBlockParams)
72  extends LazyModuleImp(wrapper)
73  with HasXSParameter {
74
75  override def desiredName: String = s"${params.getIQName}"
76
77  println(s"[IssueQueueImp] ${params.getIQName} wakeupFromWB(${io.wakeupFromWB.size}), " +
78    s"wakeup exu in(${params.wakeUpInExuSources.size}): ${params.wakeUpInExuSources.map(_.name).mkString("{",",","}")}, " +
79    s"wakeup exu out(${params.wakeUpOutExuSources.size}): ${params.wakeUpOutExuSources.map(_.name).mkString("{",",","}")}, " +
80    s"numEntries: ${params.numEntries}, numRegSrc: ${params.numRegSrc}")
81
82  require(params.numExu <= 2, "IssueQueue has not supported more than 2 deq ports")
83  val deqFuCfgs     : Seq[Seq[FuConfig]] = params.exuBlockParams.map(_.fuConfigs)
84  val allDeqFuCfgs  : Seq[FuConfig] = params.exuBlockParams.flatMap(_.fuConfigs)
85  val fuCfgsCnt     : Map[FuConfig, Int] = allDeqFuCfgs.groupBy(x => x).map { case (cfg, cfgSeq) => (cfg, cfgSeq.length) }
86  val commonFuCfgs  : Seq[FuConfig] = fuCfgsCnt.filter(_._2 > 1).keys.toSeq
87  val fuLatencyMaps : Seq[Map[FuType.OHType, Int]] = params.exuBlockParams.map(x => x.fuLatencyMap)
88
89  println(s"[IssueQueueImp] ${params.getIQName} fuLatencyMaps: ${fuLatencyMaps}")
90  println(s"[IssueQueueImp] ${params.getIQName} commonFuCfgs: ${commonFuCfgs.map(_.name)}")
91  lazy val io = IO(new IssueQueueIO())
92  // Modules
93
94  val entries = Module(new Entries)
95  val fuBusyTableWrite = params.exuBlockParams.map { case x => OptionWrapper(x.latencyValMax > 0, Module(new FuBusyTableWrite(x.fuLatencyMap))) }
96  val fuBusyTableRead = params.exuBlockParams.map { case x => OptionWrapper(x.latencyValMax > 0, Module(new FuBusyTableRead(x.fuLatencyMap))) }
97  val intWbBusyTableWrite = params.exuBlockParams.map { case x => OptionWrapper(x.intLatencyCertain, Module(new FuBusyTableWrite(x.intFuLatencyMap))) }
98  val intWbBusyTableRead = params.exuBlockParams.map { case x => OptionWrapper(x.intLatencyCertain, Module(new FuBusyTableRead(x.intFuLatencyMap))) }
99  val vfWbBusyTableWrite = params.exuBlockParams.map { case x => OptionWrapper(x.vfLatencyCertain, Module(new FuBusyTableWrite(x.vfFuLatencyMap))) }
100  val vfWbBusyTableRead = params.exuBlockParams.map { case x => OptionWrapper(x.vfLatencyCertain, Module(new FuBusyTableRead(x.vfFuLatencyMap))) }
101
102  class WakeupQueueFlush extends Bundle {
103    val redirect = ValidIO(new Redirect)
104    val ldCancel = Vec(backendParams.LduCnt + backendParams.HyuCnt, new LoadCancelIO)
105    val og0Fail = Output(Bool())
106    val og1Fail = Output(Bool())
107    val finalFail = Output(Bool())
108  }
109
110  private def flushFunc(exuInput: ExuInput, flush: WakeupQueueFlush, stage: Int): Bool = {
111    val redirectFlush = exuInput.robIdx.needFlush(flush.redirect)
112    val loadDependencyFlush = LoadShouldCancel(exuInput.loadDependency, flush.ldCancel)
113    val ogFailFlush = stage match {
114      case 1 => flush.og0Fail
115      case 2 => flush.og1Fail
116      case 3 => flush.finalFail
117      case _ => false.B
118    }
119    redirectFlush || loadDependencyFlush || ogFailFlush
120  }
121
122  private def modificationFunc(exuInput: ExuInput): ExuInput = {
123    val newExuInput = WireDefault(exuInput)
124    newExuInput.loadDependency match {
125      case Some(deps) => deps.zip(exuInput.loadDependency.get).foreach(x => x._1 := x._2 << 1)
126      case None =>
127    }
128    newExuInput
129  }
130
131  private def lastConnectFunc(exuInput: ExuInput, newInput: ExuInput): ExuInput = {
132    val lastExuInput = WireDefault(exuInput)
133    val newExuInput = WireDefault(newInput)
134    newExuInput.elements.foreach { case (name, data) =>
135      if (lastExuInput.elements.contains(name)) {
136        data := lastExuInput.elements(name)
137      }
138    }
139    if (newExuInput.pdestCopy.nonEmpty && !lastExuInput.pdestCopy.nonEmpty) {
140      newExuInput.pdestCopy.get.foreach(_ := lastExuInput.pdest)
141    }
142    if (newExuInput.rfWenCopy.nonEmpty && !lastExuInput.rfWenCopy.nonEmpty) {
143      newExuInput.rfWenCopy.get.foreach(_ := lastExuInput.rfWen.get)
144    }
145    if (newExuInput.fpWenCopy.nonEmpty && !lastExuInput.fpWenCopy.nonEmpty) {
146      newExuInput.fpWenCopy.get.foreach(_ := lastExuInput.fpWen.get)
147    }
148    if (newExuInput.vecWenCopy.nonEmpty && !lastExuInput.vecWenCopy.nonEmpty) {
149      newExuInput.vecWenCopy.get.foreach(_ := lastExuInput.rfWen.get)
150    }
151    if (newExuInput.loadDependencyCopy.nonEmpty && !lastExuInput.loadDependencyCopy.nonEmpty) {
152      newExuInput.loadDependencyCopy.get.foreach(_ := lastExuInput.loadDependency.get)
153    }
154    newExuInput
155  }
156
157  val wakeUpQueues: Seq[Option[MultiWakeupQueue[ExuInput, WakeupQueueFlush]]] = params.exuBlockParams.map { x => OptionWrapper(x.isIQWakeUpSource, Module(
158    new MultiWakeupQueue(new ExuInput(x), new ExuInput(x, x.copyWakeupOut, x.copyNum), new WakeupQueueFlush, x.fuLatancySet, flushFunc, modificationFunc, lastConnectFunc)
159  ))}
160  val deqBeforeDly = Wire(params.genIssueDecoupledBundle)
161
162  val intWbBusyTableIn = io.wbBusyTableRead.map(_.intWbBusyTable)
163  val vfWbBusyTableIn = io.wbBusyTableRead.map(_.vfWbBusyTable)
164  val intWbBusyTableOut = io.wbBusyTableWrite.map(_.intWbBusyTable)
165  val vfWbBusyTableOut = io.wbBusyTableWrite.map(_.vfWbBusyTable)
166  val intDeqRespSetOut = io.wbBusyTableWrite.map(_.intDeqRespSet)
167  val vfDeqRespSetOut = io.wbBusyTableWrite.map(_.vfDeqRespSet)
168  val fuBusyTableMask = Wire(Vec(params.numDeq, UInt(params.numEntries.W)))
169  val intWbBusyTableMask = Wire(Vec(params.numDeq, UInt(params.numEntries.W)))
170  val vfWbBusyTableMask = Wire(Vec(params.numDeq, UInt(params.numEntries.W)))
171  val s0_enqValidVec = io.enq.map(_.valid)
172  val s0_enqSelValidVec = Wire(Vec(params.numEnq, Bool()))
173  val s0_enqNotFlush = !io.flush.valid
174  val s0_enqBits = WireInit(VecInit(io.enq.map(_.bits)))
175  val s0_doEnqSelValidVec = s0_enqSelValidVec.map(_ && s0_enqNotFlush) //enqValid && notFlush && enqReady
176
177
178  val finalDeqSelValidVec = Wire(Vec(params.numDeq, Bool()))
179  val finalDeqSelOHVec    = Wire(Vec(params.numDeq, UInt(params.numEntries.W)))
180
181  val validVec = VecInit(entries.io.valid.asBools)
182  val canIssueVec = VecInit(entries.io.canIssue.asBools)
183  dontTouch(canIssueVec)
184  val deqFirstIssueVec = entries.io.isFirstIssue
185
186  val dataSources: Vec[Vec[DataSource]] = entries.io.dataSources
187  val finalDataSources: Vec[Vec[DataSource]] = VecInit(finalDeqSelOHVec.map(oh => Mux1H(oh, dataSources)))
188  // (entryIdx)(srcIdx)(exuIdx)
189  val wakeUpL1ExuOH: Option[Vec[Vec[UInt]]] = entries.io.srcWakeUpL1ExuOH
190  val srcTimer: Option[Vec[Vec[UInt]]] = entries.io.srcTimer
191
192  // (deqIdx)(srcIdx)(exuIdx)
193  val finalWakeUpL1ExuOH: Option[Vec[Vec[UInt]]] = wakeUpL1ExuOH.map(x => VecInit(finalDeqSelOHVec.map(oh => Mux1H(oh, x))))
194  val finalSrcTimer = srcTimer.map(x => VecInit(finalDeqSelOHVec.map(oh => Mux1H(oh, x))))
195
196  val fuTypeVec = Wire(Vec(params.numEntries, FuType()))
197  val transEntryDeqVec = Wire(Vec(params.numEnq, ValidIO(new EntryBundle)))
198  val deqEntryVec = Wire(Vec(params.numDeq, ValidIO(new EntryBundle)))
199  val transSelVec = Wire(Vec(params.numEnq, UInt((params.numEntries-params.numEnq).W)))
200  val canIssueMergeAllBusy = Wire(Vec(params.numDeq, UInt(params.numEntries.W)))
201  val deqCanIssue = Wire(Vec(params.numDeq, UInt(params.numEntries.W)))
202
203  val enqEntryOldestSel = Wire(Vec(params.numDeq, ValidIO(UInt(params.numEnq.W))))
204  val othersEntryOldestSel = Wire(Vec(params.numDeq, ValidIO(UInt((params.numEntries - params.numEnq).W))))
205  val deqSelValidVec = Wire(Vec(params.numDeq, Bool()))
206  val deqSelOHVec    = Wire(Vec(params.numDeq, UInt(params.numEntries.W)))
207  val cancelDeqVec = Wire(Vec(params.numDeq, Bool()))
208
209  val subDeqSelValidVec = OptionWrapper(params.deqFuSame, Wire(Vec(params.numDeq, Bool())))
210  val subDeqSelOHVec = OptionWrapper(params.deqFuSame, Wire(Vec(params.numDeq, UInt(params.numEntries.W))))
211  val subDeqRequest = OptionWrapper(params.deqFuSame, Wire(UInt(params.numEntries.W)))
212
213  /**
214    * Connection of [[entries]]
215    */
216  entries.io match { case entriesIO: EntriesIO =>
217    entriesIO.flush                                             := io.flush
218    entriesIO.enq.zipWithIndex.foreach { case (enq, enqIdx) =>
219      enq.valid                                                 := s0_doEnqSelValidVec(enqIdx)
220      enq.bits.status.robIdx                                    := s0_enqBits(enqIdx).robIdx
221      enq.bits.status.fuType                                    := IQFuType.readFuType(VecInit(s0_enqBits(enqIdx).fuType.asBools), params.getFuCfgs.map(_.fuType))
222      val numLsrc = s0_enqBits(enqIdx).srcType.size.min(enq.bits.status.srcStatus.map(_.srcType).size)
223      for(j <- 0 until numLsrc) {
224        enq.bits.status.srcStatus(j).psrc                       := s0_enqBits(enqIdx).psrc(j)
225        enq.bits.status.srcStatus(j).srcType                    := s0_enqBits(enqIdx).srcType(j)
226        enq.bits.status.srcStatus(j).srcState                   := s0_enqBits(enqIdx).srcState(j) & !LoadShouldCancel(Some(s0_enqBits(enqIdx).srcLoadDependency(j)), io.ldCancel)
227        enq.bits.status.srcStatus(j).dataSources.value          := DataSource.reg
228        if(params.hasIQWakeUp) {
229          enq.bits.status.srcStatus(j).srcTimer.get             := 0.U(3.W)
230          enq.bits.status.srcStatus(j).srcWakeUpL1ExuOH.get     := 0.U.asTypeOf(ExuVec())
231          enq.bits.status.srcStatus(j).srcLoadDependency.get    := VecInit(s0_enqBits(enqIdx).srcLoadDependency(j).map(x => x(x.getWidth - 2, 0) << 1))
232        }
233      }
234      enq.bits.status.blocked                                   := false.B
235      enq.bits.status.issued                                    := false.B
236      enq.bits.status.firstIssue                                := false.B
237      enq.bits.status.issueTimer                                := "b10".U
238      enq.bits.status.deqPortIdx                                := 0.U
239      if (params.isVecMemIQ) {
240        enq.bits.status.vecMem.get.uopIdx := s0_enqBits(enqIdx).uopIdx
241      }
242      if (params.inIntSchd && params.AluCnt > 0) {
243        // dirty code for lui+addi(w) fusion
244        val isLuiAddiFusion = s0_enqBits(enqIdx).isLUI32
245        val luiImm = Cat(s0_enqBits(enqIdx).lsrc(1), s0_enqBits(enqIdx).lsrc(0), s0_enqBits(enqIdx).imm(ImmUnion.maxLen - 1, 0))
246        enq.bits.imm.foreach(_ := Mux(isLuiAddiFusion, ImmUnion.LUI32.toImm32(luiImm), s0_enqBits(enqIdx).imm))
247      }
248      else if (params.inMemSchd && params.LduCnt > 0) {
249        // dirty code for fused_lui_load
250        val isLuiLoadFusion = SrcType.isNotReg(s0_enqBits(enqIdx).srcType(0)) && FuType.isLoad(s0_enqBits(enqIdx).fuType)
251        enq.bits.imm.foreach(_ := Mux(isLuiLoadFusion, Imm_LUI_LOAD().getLuiImm(s0_enqBits(enqIdx)), s0_enqBits(enqIdx).imm))
252      }
253      else {
254        enq.bits.imm.foreach(_ := s0_enqBits(enqIdx).imm)
255      }
256      enq.bits.payload                                          := s0_enqBits(enqIdx)
257    }
258    entriesIO.og0Resp.zipWithIndex.foreach { case (og0Resp, i) =>
259      og0Resp.valid                                             := io.og0Resp(i).valid
260      og0Resp.bits.robIdx                                       := io.og0Resp(i).bits.robIdx
261      og0Resp.bits.uopIdx.foreach(_                             := io.og0Resp(i).bits.uopIdx.get)
262      og0Resp.bits.dataInvalidSqIdx                             := io.og0Resp(i).bits.dataInvalidSqIdx
263      og0Resp.bits.respType                                     := io.og0Resp(i).bits.respType
264      og0Resp.bits.rfWen                                        := io.og0Resp(i).bits.rfWen
265      og0Resp.bits.fuType                                       := io.og0Resp(i).bits.fuType
266    }
267    entriesIO.og1Resp.zipWithIndex.foreach { case (og1Resp, i) =>
268      og1Resp.valid                                             := io.og1Resp(i).valid
269      og1Resp.bits.robIdx                                       := io.og1Resp(i).bits.robIdx
270      og1Resp.bits.uopIdx.foreach(_                             := io.og1Resp(i).bits.uopIdx.get)
271      og1Resp.bits.dataInvalidSqIdx                             := io.og1Resp(i).bits.dataInvalidSqIdx
272      og1Resp.bits.respType                                     := io.og1Resp(i).bits.respType
273      og1Resp.bits.rfWen                                        := io.og1Resp(i).bits.rfWen
274      og1Resp.bits.fuType                                       := io.og1Resp(i).bits.fuType
275    }
276    entriesIO.finalIssueResp.foreach(_.zipWithIndex.foreach { case (finalIssueResp, i) =>
277      finalIssueResp                                            := io.finalIssueResp.get(i)
278    })
279    for(deqIdx <- 0 until params.numDeq) {
280      entriesIO.deqReady(deqIdx)                                := deqBeforeDly(deqIdx).ready
281      entriesIO.deqSelOH(deqIdx).valid                          := deqSelValidVec(deqIdx)
282      entriesIO.deqSelOH(deqIdx).bits                           := deqSelOHVec(deqIdx)
283      entriesIO.enqEntryOldestSel(deqIdx)                       := enqEntryOldestSel(deqIdx)
284      entriesIO.othersEntryOldestSel(deqIdx)                    := othersEntryOldestSel(deqIdx)
285      entriesIO.subDeqRequest.foreach(_(deqIdx)                 := subDeqRequest.get)
286      entriesIO.subDeqSelOH.foreach(_(deqIdx)                   := subDeqSelOHVec.get(deqIdx))
287    }
288    entriesIO.wakeUpFromWB                                      := io.wakeupFromWB
289    entriesIO.wakeUpFromIQ                                      := io.wakeupFromIQ
290    entriesIO.og0Cancel                                         := io.og0Cancel
291    entriesIO.og1Cancel                                         := io.og1Cancel
292    entriesIO.ldCancel                                          := io.ldCancel
293    //output
294    transEntryDeqVec                                            := entriesIO.transEntryDeqVec
295    transSelVec                                                 := entriesIO.transSelVec
296    fuTypeVec                                                   := entriesIO.fuType
297    deqEntryVec                                                 := entriesIO.deqEntry
298    cancelDeqVec                                                := entriesIO.cancelDeqVec
299  }
300
301
302  s0_enqSelValidVec := s0_enqValidVec.zip(io.enq).map{ case (enqValid, enq) => enqValid && enq.ready}
303
304  protected val commonAccept: UInt = Cat(fuTypeVec.map(fuType =>
305    FuType.FuTypeOrR(fuType, commonFuCfgs.map(_.fuType))
306  ).reverse)
307
308  // if deq port can accept the uop
309  protected val canAcceptVec: Seq[UInt] = deqFuCfgs.map { fuCfgs: Seq[FuConfig] =>
310    Cat(fuTypeVec.map(fuType =>
311      FuType.FuTypeOrR(fuType, fuCfgs.map(_.fuType))
312    ).reverse)
313  }
314
315  protected val deqCanAcceptVec: Seq[IndexedSeq[Bool]] = deqFuCfgs.map { fuCfgs: Seq[FuConfig] =>
316    fuTypeVec.map(fuType =>
317      FuType.FuTypeOrR(fuType, fuCfgs.map(_.fuType)))
318  }
319
320  canIssueMergeAllBusy.zipWithIndex.foreach { case (merge, i) =>
321    val mergeFuBusy = {
322      if (fuBusyTableWrite(i).nonEmpty) canIssueVec.asUInt & (~fuBusyTableMask(i))
323      else canIssueVec.asUInt
324    }
325    val mergeIntWbBusy = {
326      if (intWbBusyTableRead(i).nonEmpty) mergeFuBusy & (~intWbBusyTableMask(i))
327      else mergeFuBusy
328    }
329    val mergeVfWbBusy = {
330      if (vfWbBusyTableRead(i).nonEmpty) mergeIntWbBusy & (~vfWbBusyTableMask(i))
331      else mergeIntWbBusy
332    }
333    merge := mergeVfWbBusy
334  }
335
336  deqCanIssue.zipWithIndex.foreach { case (req, i) =>
337    req := canIssueMergeAllBusy(i) & VecInit(deqCanAcceptVec(i)).asUInt
338  }
339  dontTouch(fuTypeVec)
340  dontTouch(canIssueMergeAllBusy)
341  dontTouch(deqCanIssue)
342
343  if (params.numDeq == 2) {
344    require(params.deqFuSame || params.deqFuDiff, "The 2 deq ports need to be identical or completely different")
345  }
346
347  if (params.numDeq == 2 && params.deqFuSame) {
348    enqEntryOldestSel := DontCare
349
350    othersEntryOldestSel(0) := AgeDetector(numEntries = params.numEntries - params.numEnq,
351      enq = VecInit(transEntryDeqVec.zip(transSelVec).map{ case (transEntry, transSel) => Fill(params.numEntries-params.numEnq, transEntry.valid) & transSel }),
352      canIssue = canIssueVec.asUInt(params.numEntries-1, params.numEnq)
353    )
354    othersEntryOldestSel(1) := DontCare
355
356    subDeqRequest.get := canIssueVec.asUInt & ~Cat(othersEntryOldestSel(0).bits, 0.U((params.numEnq).W))
357
358    val subDeqPolicy = Module(new DeqPolicy())
359    subDeqPolicy.io.request := subDeqRequest.get
360    subDeqSelValidVec.get := subDeqPolicy.io.deqSelOHVec.map(oh => oh.valid)
361    subDeqSelOHVec.get := subDeqPolicy.io.deqSelOHVec.map(oh => oh.bits)
362
363    deqSelValidVec(0) := othersEntryOldestSel(0).valid || subDeqSelValidVec.get(1)
364    deqSelValidVec(1) := subDeqSelValidVec.get(0)
365    deqSelOHVec(0) := Mux(othersEntryOldestSel(0).valid,
366                          Cat(othersEntryOldestSel(0).bits, 0.U((params.numEnq).W)),
367                          subDeqSelOHVec.get(1)) & canIssueMergeAllBusy(0)
368    deqSelOHVec(1) := subDeqSelOHVec.get(0) & canIssueMergeAllBusy(1)
369
370    finalDeqSelValidVec.zip(finalDeqSelOHVec).zip(deqSelValidVec).zip(deqSelOHVec).zipWithIndex.foreach { case ((((selValid, selOH), deqValid), deqOH), i) =>
371      selValid := deqValid && deqOH.orR && deqBeforeDly(i).ready
372      selOH := deqOH
373    }
374  }
375  else {
376    enqEntryOldestSel := NewAgeDetector(numEntries = params.numEnq,
377      enq = VecInit(s0_doEnqSelValidVec),
378      canIssue = VecInit(deqCanIssue.map(_(params.numEnq-1, 0)))
379    )
380
381    othersEntryOldestSel := AgeDetector(numEntries = params.numEntries - params.numEnq,
382      enq = VecInit(transEntryDeqVec.zip(transSelVec).map{ case (transEntry, transSel) => Fill(params.numEntries-params.numEnq, transEntry.valid) & transSel }),
383      canIssue = VecInit(deqCanIssue.map(_(params.numEntries-1, params.numEnq)))
384    )
385
386    deqSelValidVec.zip(deqSelOHVec).zipWithIndex.foreach { case ((selValid, selOH), i) =>
387      if (params.exuBlockParams(i).fuConfigs.contains(FuConfig.FakeHystaCfg)) {
388        selValid := false.B
389        selOH := 0.U.asTypeOf(selOH)
390      } else {
391        selValid := othersEntryOldestSel(i).valid || enqEntryOldestSel(i).valid
392        selOH := Cat(othersEntryOldestSel(i).bits, Fill(params.numEnq, enqEntryOldestSel(i).valid && !othersEntryOldestSel(i).valid) & enqEntryOldestSel(i).bits)
393      }
394    }
395
396    finalDeqSelValidVec.zip(finalDeqSelOHVec).zip(deqSelValidVec).zip(deqSelOHVec).zipWithIndex.foreach { case ((((selValid, selOH), deqValid), deqOH), i) =>
397      selValid := deqValid && deqBeforeDly(i).ready
398      selOH := deqOH
399    }
400  }
401
402  val toBusyTableDeqResp = Wire(Vec(params.numDeq, ValidIO(new IssueQueueDeqRespBundle)))
403
404  toBusyTableDeqResp.zipWithIndex.foreach { case (deqResp, i) =>
405    deqResp.valid := finalDeqSelValidVec(i)
406    deqResp.bits.respType := RSFeedbackType.issueSuccess
407    deqResp.bits.robIdx := DontCare
408    deqResp.bits.dataInvalidSqIdx := DontCare
409    deqResp.bits.rfWen := DontCare
410    deqResp.bits.fuType := deqBeforeDly(i).bits.common.fuType
411    deqResp.bits.uopIdx.foreach(_ := DontCare)
412  }
413
414  //fuBusyTable
415  fuBusyTableWrite.zip(fuBusyTableRead).zipWithIndex.foreach { case ((busyTableWrite: Option[FuBusyTableWrite], busyTableRead: Option[FuBusyTableRead]), i) =>
416    if(busyTableWrite.nonEmpty) {
417      val btwr = busyTableWrite.get
418      val btrd = busyTableRead.get
419      btwr.io.in.deqResp := toBusyTableDeqResp(i)
420      btwr.io.in.og0Resp := io.og0Resp(i)
421      btwr.io.in.og1Resp := io.og1Resp(i)
422      btrd.io.in.fuBusyTable := btwr.io.out.fuBusyTable
423      btrd.io.in.fuTypeRegVec := fuTypeVec
424      fuBusyTableMask(i) := btrd.io.out.fuBusyTableMask
425    }
426    else {
427      fuBusyTableMask(i) := 0.U(params.numEntries.W)
428    }
429  }
430
431  //wbfuBusyTable write
432  intWbBusyTableWrite.zip(intWbBusyTableOut).zip(intDeqRespSetOut).zipWithIndex.foreach { case (((busyTableWrite: Option[FuBusyTableWrite], busyTable: Option[UInt]), deqResp), i) =>
433    if(busyTableWrite.nonEmpty) {
434      val btwr = busyTableWrite.get
435      val bt = busyTable.get
436      val dq = deqResp.get
437      btwr.io.in.deqResp := toBusyTableDeqResp(i)
438      btwr.io.in.og0Resp := io.og0Resp(i)
439      btwr.io.in.og1Resp := io.og1Resp(i)
440      bt := btwr.io.out.fuBusyTable
441      dq := btwr.io.out.deqRespSet
442    }
443  }
444
445  vfWbBusyTableWrite.zip(vfWbBusyTableOut).zip(vfDeqRespSetOut).zipWithIndex.foreach { case (((busyTableWrite: Option[FuBusyTableWrite], busyTable: Option[UInt]), deqResp), i) =>
446    if (busyTableWrite.nonEmpty) {
447      val btwr = busyTableWrite.get
448      val bt = busyTable.get
449      val dq = deqResp.get
450      btwr.io.in.deqResp := toBusyTableDeqResp(i)
451      btwr.io.in.og0Resp := io.og0Resp(i)
452      btwr.io.in.og1Resp := io.og1Resp(i)
453      bt := btwr.io.out.fuBusyTable
454      dq := btwr.io.out.deqRespSet
455    }
456  }
457
458  //wbfuBusyTable read
459  intWbBusyTableRead.zip(intWbBusyTableIn).zipWithIndex.foreach { case ((busyTableRead: Option[FuBusyTableRead], busyTable: Option[UInt]), i) =>
460    if(busyTableRead.nonEmpty) {
461      val btrd = busyTableRead.get
462      val bt = busyTable.get
463      btrd.io.in.fuBusyTable := bt
464      btrd.io.in.fuTypeRegVec := fuTypeVec
465      intWbBusyTableMask(i) := btrd.io.out.fuBusyTableMask
466    }
467    else {
468      intWbBusyTableMask(i) := 0.U(params.numEntries.W)
469    }
470  }
471  vfWbBusyTableRead.zip(vfWbBusyTableIn).zipWithIndex.foreach { case ((busyTableRead: Option[FuBusyTableRead], busyTable: Option[UInt]), i) =>
472    if (busyTableRead.nonEmpty) {
473      val btrd = busyTableRead.get
474      val bt = busyTable.get
475      btrd.io.in.fuBusyTable := bt
476      btrd.io.in.fuTypeRegVec := fuTypeVec
477      vfWbBusyTableMask(i) := btrd.io.out.fuBusyTableMask
478    }
479    else {
480      vfWbBusyTableMask(i) := 0.U(params.numEntries.W)
481    }
482  }
483
484  wakeUpQueues.zipWithIndex.foreach { case (wakeUpQueueOption, i) =>
485    wakeUpQueueOption.foreach {
486      wakeUpQueue =>
487        val flush = Wire(new WakeupQueueFlush)
488        flush.redirect := io.flush
489        flush.ldCancel := io.ldCancel
490        flush.og0Fail := io.og0Resp(i).valid && RSFeedbackType.isBlocked(io.og0Resp(i).bits.respType)
491        flush.og1Fail := io.og1Resp(i).valid && RSFeedbackType.isBlocked(io.og1Resp(i).bits.respType)
492        flush.finalFail := io.finalBlock(i)
493        wakeUpQueue.io.flush := flush
494        wakeUpQueue.io.enq.valid := deqBeforeDly(i).fire
495        wakeUpQueue.io.enq.bits.uop :<= deqBeforeDly(i).bits.common
496        wakeUpQueue.io.enq.bits.uop.pdestCopy.foreach(_ := 0.U)
497        wakeUpQueue.io.enq.bits.lat := getDeqLat(i, deqBeforeDly(i).bits.common.fuType)
498    }
499  }
500
501  deqBeforeDly.zipWithIndex.foreach { case (deq, i) =>
502    deq.valid                := finalDeqSelValidVec(i) && !cancelDeqVec(i)
503    deq.bits.addrOH          := finalDeqSelOHVec(i)
504    deq.bits.common.isFirstIssue := deqFirstIssueVec(i)
505    deq.bits.common.iqIdx    := OHToUInt(finalDeqSelOHVec(i))
506    deq.bits.common.fuType   := IQFuType.readFuType(deqEntryVec(i).bits.status.fuType, params.getFuCfgs.map(_.fuType)).asUInt
507    deq.bits.common.fuOpType := deqEntryVec(i).bits.payload.fuOpType
508    deq.bits.common.rfWen.foreach(_ := deqEntryVec(i).bits.payload.rfWen)
509    deq.bits.common.fpWen.foreach(_ := deqEntryVec(i).bits.payload.fpWen)
510    deq.bits.common.vecWen.foreach(_ := deqEntryVec(i).bits.payload.vecWen)
511    deq.bits.common.flushPipe.foreach(_ := deqEntryVec(i).bits.payload.flushPipe)
512    deq.bits.common.pdest := deqEntryVec(i).bits.payload.pdest
513    deq.bits.common.robIdx := deqEntryVec(i).bits.status.robIdx
514
515    require(deq.bits.common.dataSources.size <= finalDataSources(i).size)
516    deq.bits.common.dataSources.zip(finalDataSources(i)).foreach { case (sink, source) => sink := source}
517    deq.bits.common.l1ExuOH.foreach(_ := finalWakeUpL1ExuOH.get(i))
518    deq.bits.common.srcTimer.foreach(_ := finalSrcTimer.get(i))
519    deq.bits.common.loadDependency.foreach(_ := deqEntryVec(i).bits.status.mergedLoadDependency.get)
520    deq.bits.common.deqLdExuIdx.foreach(_ := params.backendParam.getLdExuIdx(deq.bits.exuParams).U)
521    deq.bits.common.src := DontCare
522    deq.bits.common.preDecode.foreach(_ := deqEntryVec(i).bits.payload.preDecodeInfo)
523
524    deq.bits.rf.zip(deqEntryVec(i).bits.status.srcStatus.map(_.psrc)).zip(deqEntryVec(i).bits.status.srcStatus.map(_.srcType)).foreach { case ((rf, psrc), srcType) =>
525      // psrc in status array can be pregIdx of IntRegFile or VfRegFile
526      rf.foreach(_.addr := psrc)
527      rf.foreach(_.srcType := srcType)
528    }
529    deq.bits.srcType.zip(deqEntryVec(i).bits.status.srcStatus.map(_.srcType)).foreach { case (sink, source) =>
530      sink := source
531    }
532    deq.bits.immType := deqEntryVec(i).bits.payload.selImm
533    deq.bits.common.imm := deqEntryVec(i).bits.imm.getOrElse(0.U)
534
535    deq.bits.common.perfDebugInfo := deqEntryVec(i).bits.payload.debugInfo
536    deq.bits.common.perfDebugInfo.selectTime := GTimer()
537    deq.bits.common.perfDebugInfo.issueTime := GTimer() + 1.U
538  }
539
540  private val deqShift = WireDefault(deqBeforeDly)
541  deqShift.zip(deqBeforeDly).foreach {
542    case (shifted, original) =>
543      original.ready := shifted.ready // this will not cause combinational loop
544      shifted.bits.common.loadDependency.foreach(
545        _ := original.bits.common.loadDependency.get.map(_ << 1)
546      )
547  }
548  io.deqDelay.zip(deqShift).foreach { case (deqDly, deq) =>
549    NewPipelineConnect(
550      deq, deqDly, deqDly.valid,
551      false.B,
552      Option("Scheduler2DataPathPipe")
553    )
554  }
555  if(backendParams.debugEn) {
556    dontTouch(io.deqDelay)
557  }
558  io.wakeupToIQ.zipWithIndex.foreach { case (wakeup, i) =>
559    if (wakeUpQueues(i).nonEmpty && finalWakeUpL1ExuOH.nonEmpty) {
560      wakeup.valid := wakeUpQueues(i).get.io.deq.valid
561      wakeup.bits.fromExuInput(wakeUpQueues(i).get.io.deq.bits, finalWakeUpL1ExuOH.get(i))
562      wakeup.bits.loadDependency := wakeUpQueues(i).get.io.deq.bits.loadDependency.getOrElse(0.U.asTypeOf(wakeup.bits.loadDependency))
563      wakeup.bits.is0Lat := getDeqLat(i, wakeUpQueues(i).get.io.deq.bits.fuType) === 0.U
564    } else if (wakeUpQueues(i).nonEmpty) {
565      wakeup.valid := wakeUpQueues(i).get.io.deq.valid
566      wakeup.bits.fromExuInput(wakeUpQueues(i).get.io.deq.bits)
567      wakeup.bits.loadDependency := wakeUpQueues(i).get.io.deq.bits.loadDependency.getOrElse(0.U.asTypeOf(wakeup.bits.loadDependency))
568      wakeup.bits.is0Lat := getDeqLat(i, wakeUpQueues(i).get.io.deq.bits.fuType) === 0.U
569    } else {
570      wakeup.valid := false.B
571      wakeup.bits := 0.U.asTypeOf(wakeup.bits)
572      wakeup.bits.is0Lat :=  0.U
573    }
574    if (wakeUpQueues(i).nonEmpty) {
575      wakeup.bits.rfWen  := (if (wakeUpQueues(i).get.io.deq.bits.rfWen .nonEmpty) wakeUpQueues(i).get.io.deq.valid && wakeUpQueues(i).get.io.deq.bits.rfWen .get else false.B)
576      wakeup.bits.fpWen  := (if (wakeUpQueues(i).get.io.deq.bits.fpWen .nonEmpty) wakeUpQueues(i).get.io.deq.valid && wakeUpQueues(i).get.io.deq.bits.fpWen .get else false.B)
577      wakeup.bits.vecWen := (if (wakeUpQueues(i).get.io.deq.bits.vecWen.nonEmpty) wakeUpQueues(i).get.io.deq.valid && wakeUpQueues(i).get.io.deq.bits.vecWen.get else false.B)
578    }
579
580    if(wakeUpQueues(i).nonEmpty && wakeup.bits.pdestCopy.nonEmpty){
581      wakeup.bits.pdestCopy.get := wakeUpQueues(i).get.io.deq.bits.pdestCopy.get
582    }
583    if (wakeUpQueues(i).nonEmpty && wakeup.bits.rfWenCopy.nonEmpty) {
584      wakeup.bits.rfWenCopy.get := wakeUpQueues(i).get.io.deq.bits.rfWenCopy.get
585    }
586    if (wakeUpQueues(i).nonEmpty && wakeup.bits.fpWenCopy.nonEmpty) {
587      wakeup.bits.fpWenCopy.get := wakeUpQueues(i).get.io.deq.bits.fpWenCopy.get
588    }
589    if (wakeUpQueues(i).nonEmpty && wakeup.bits.vecWenCopy.nonEmpty) {
590      wakeup.bits.vecWenCopy.get := wakeUpQueues(i).get.io.deq.bits.vecWenCopy.get
591    }
592    if (wakeUpQueues(i).nonEmpty && wakeup.bits.loadDependencyCopy.nonEmpty) {
593      wakeup.bits.loadDependencyCopy.get := wakeUpQueues(i).get.io.deq.bits.loadDependencyCopy.get
594    }
595  }
596
597  // Todo: better counter implementation
598  private val enqHasValid = validVec.take(params.numEnq).reduce(_ | _)
599  private val enqEntryValidCnt = PopCount(validVec.take(params.numEnq))
600  private val othersValidCnt = PopCount(validVec.drop(params.numEnq))
601  io.status.leftVec(0) := validVec.drop(params.numEnq).reduce(_ & _)
602  for (i <- 0 until params.numEnq) {
603    io.status.leftVec(i + 1) := othersValidCnt === (params.numEntries - params.numEnq - (i + 1)).U
604  }
605  private val othersLeftOneCaseVec = Wire(Vec(params.numEntries - params.numEnq, UInt((params.numEntries - params.numEnq).W)))
606  othersLeftOneCaseVec.zipWithIndex.foreach { case (leftone, i) =>
607    leftone := ~(1.U((params.numEntries - params.numEnq).W) << i)
608  }
609  private val othersLeftOne = othersLeftOneCaseVec.map(_ === VecInit(validVec.drop(params.numEnq)).asUInt).reduce(_ | _)
610  private val othersCanotIn = othersLeftOne || validVec.drop(params.numEnq).reduce(_ & _)
611
612  io.enq.foreach(_.ready := !othersCanotIn || !enqHasValid)
613  io.status.empty := !Cat(validVec).orR
614  io.status.full := othersCanotIn
615  io.status.validCnt := PopCount(validVec)
616
617  protected def getDeqLat(deqPortIdx: Int, fuType: UInt) : UInt = {
618    Mux1H(fuLatencyMaps(deqPortIdx) map { case (k, v) => (fuType(k.id), v.U) })
619  }
620
621  // issue perf counter
622  // enq count
623  XSPerfAccumulate("enq_valid_cnt", PopCount(io.enq.map(_.fire)))
624  XSPerfAccumulate("enq_fire_cnt", PopCount(io.enq.map(_.fire)))
625  // valid count
626  XSPerfHistogram("enq_entry_valid_cnt", enqEntryValidCnt, true.B, 0, params.numEnq + 1)
627  XSPerfHistogram("other_entry_valid_cnt", othersValidCnt, true.B, 0, params.numEntries - params.numEnq + 1)
628  XSPerfHistogram("valid_cnt", PopCount(validVec), true.B, 0, params.numEntries + 1)
629  // only split when more than 1 func type
630  if (params.getFuCfgs.size > 0) {
631    for (t <- FuType.functionNameMap.keys) {
632      val fuName = FuType.functionNameMap(t)
633      if (params.getFuCfgs.map(_.fuType == t).reduce(_ | _)) {
634        XSPerfHistogram(s"valid_cnt_hist_futype_${fuName}", PopCount(validVec.zip(fuTypeVec).map { case (v, fu) => v && fu === t.U }), true.B, 0, params.numEntries, 1)
635      }
636    }
637  }
638  // ready instr count
639  private val readyEntriesCnt = PopCount(validVec.zip(canIssueVec).map(x => x._1 && x._2))
640  XSPerfHistogram("ready_cnt", readyEntriesCnt, true.B, 0, params.numEntries + 1)
641  // only split when more than 1 func type
642  if (params.getFuCfgs.size > 0) {
643    for (t <- FuType.functionNameMap.keys) {
644      val fuName = FuType.functionNameMap(t)
645      if (params.getFuCfgs.map(_.fuType == t).reduce(_ | _)) {
646        XSPerfHistogram(s"ready_cnt_hist_futype_${fuName}", PopCount(validVec.zip(canIssueVec).zip(fuTypeVec).map { case ((v, c), fu) => v && c && fu === t.U }), true.B, 0, params.numEntries, 1)
647      }
648    }
649  }
650
651  // deq instr count
652  XSPerfAccumulate("issue_instr_pre_count", PopCount(deqBeforeDly.map(_.valid)))
653  XSPerfHistogram("issue_instr_pre_count_hist", PopCount(deqBeforeDly.map(_.valid)), true.B, 0, params.numDeq + 1, 1)
654  XSPerfAccumulate("issue_instr_count", PopCount(io.deqDelay.map(_.valid)))
655  XSPerfHistogram("issue_instr_count_hist", PopCount(io.deqDelay.map(_.valid)), true.B, 0, params.numDeq + 1, 1)
656
657  // deq instr data source count
658  XSPerfAccumulate("issue_datasource_reg", deqBeforeDly.map{ deq =>
659    PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.reg && !SrcType.isNotReg(deq.bits.srcType(j)) })
660  }.reduce(_ +& _))
661  XSPerfAccumulate("issue_datasource_bypass", deqBeforeDly.map{ deq =>
662    PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.bypass && !SrcType.isNotReg(deq.bits.srcType(j)) })
663  }.reduce(_ +& _))
664  XSPerfAccumulate("issue_datasource_forward", deqBeforeDly.map{ deq =>
665    PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.forward && !SrcType.isNotReg(deq.bits.srcType(j)) })
666  }.reduce(_ +& _))
667  XSPerfAccumulate("issue_datasource_noreg", deqBeforeDly.map{ deq =>
668    PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && SrcType.isNotReg(deq.bits.srcType(j)) })
669  }.reduce(_ +& _))
670
671  XSPerfHistogram("issue_datasource_reg_hist", deqBeforeDly.map{ deq =>
672    PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.reg && !SrcType.isNotReg(deq.bits.srcType(j)) })
673  }.reduce(_ +& _), true.B, 0, params.numDeq * params.numRegSrc + 1, 1)
674  XSPerfHistogram("issue_datasource_bypass_hist", deqBeforeDly.map{ deq =>
675    PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.bypass && !SrcType.isNotReg(deq.bits.srcType(j)) })
676  }.reduce(_ +& _), true.B, 0, params.numDeq * params.numRegSrc + 1, 1)
677  XSPerfHistogram("issue_datasource_forward_hist", deqBeforeDly.map{ deq =>
678    PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.forward && !SrcType.isNotReg(deq.bits.srcType(j)) })
679  }.reduce(_ +& _), true.B, 0, params.numDeq * params.numRegSrc + 1, 1)
680  XSPerfHistogram("issue_datasource_noreg_hist", deqBeforeDly.map{ deq =>
681    PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && SrcType.isNotReg(deq.bits.srcType(j)) })
682  }.reduce(_ +& _), true.B, 0, params.numDeq * params.numRegSrc + 1, 1)
683
684  // deq instr data source count for each futype
685  for (t <- FuType.functionNameMap.keys) {
686    val fuName = FuType.functionNameMap(t)
687    if (params.getFuCfgs.map(_.fuType == t).reduce(_ | _)) {
688      XSPerfAccumulate(s"issue_datasource_reg_futype_${fuName}", deqBeforeDly.map{ deq =>
689        PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.reg && !SrcType.isNotReg(deq.bits.srcType(j)) && deq.bits.common.fuType === t.U })
690      }.reduce(_ +& _))
691      XSPerfAccumulate(s"issue_datasource_bypass_futype_${fuName}", deqBeforeDly.map{ deq =>
692        PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.bypass && !SrcType.isNotReg(deq.bits.srcType(j)) && deq.bits.common.fuType === t.U })
693      }.reduce(_ +& _))
694      XSPerfAccumulate(s"issue_datasource_forward_futype_${fuName}", deqBeforeDly.map{ deq =>
695        PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.forward && !SrcType.isNotReg(deq.bits.srcType(j)) && deq.bits.common.fuType === t.U })
696      }.reduce(_ +& _))
697      XSPerfAccumulate(s"issue_datasource_noreg_futype_${fuName}", deqBeforeDly.map{ deq =>
698        PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && SrcType.isNotReg(deq.bits.srcType(j)) && deq.bits.common.fuType === t.U })
699      }.reduce(_ +& _))
700
701      XSPerfHistogram(s"issue_datasource_reg_hist_futype_${fuName}", deqBeforeDly.map{ deq =>
702        PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.reg && !SrcType.isNotReg(deq.bits.srcType(j)) && deq.bits.common.fuType === t.U })
703      }.reduce(_ +& _), true.B, 0, params.numDeq * params.numRegSrc + 1, 1)
704      XSPerfHistogram(s"issue_datasource_bypass_hist_futype_${fuName}", deqBeforeDly.map{ deq =>
705        PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.bypass && !SrcType.isNotReg(deq.bits.srcType(j)) && deq.bits.common.fuType === t.U })
706      }.reduce(_ +& _), true.B, 0, params.numDeq * params.numRegSrc + 1, 1)
707      XSPerfHistogram(s"issue_datasource_forward_hist_futype_${fuName}", deqBeforeDly.map{ deq =>
708        PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.forward && !SrcType.isNotReg(deq.bits.srcType(j)) && deq.bits.common.fuType === t.U })
709      }.reduce(_ +& _), true.B, 0, params.numDeq * params.numRegSrc + 1, 1)
710      XSPerfHistogram(s"issue_datasource_noreg_hist_futype_${fuName}", deqBeforeDly.map{ deq =>
711        PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && SrcType.isNotReg(deq.bits.srcType(j)) && deq.bits.common.fuType === t.U })
712      }.reduce(_ +& _), true.B, 0, params.numDeq * params.numRegSrc + 1, 1)
713    }
714  }
715
716  // cancel instr count
717  if (params.hasIQWakeUp) {
718    val cancelVec: Vec[Bool] = entries.io.cancel.get
719    XSPerfAccumulate("cancel_instr_count", PopCount(validVec.zip(cancelVec).map(x => x._1 & x._2)))
720    XSPerfHistogram("cancel_instr_hist", PopCount(validVec.zip(cancelVec).map(x => x._1 & x._2)), true.B, 0, params.numEntries, 1)
721    for (t <- FuType.functionNameMap.keys) {
722      val fuName = FuType.functionNameMap(t)
723      if (params.getFuCfgs.map(_.fuType == t).reduce(_ | _)) {
724        XSPerfAccumulate(s"cancel_instr_count_futype_${fuName}", PopCount(validVec.zip(cancelVec).zip(fuTypeVec).map{ case ((x, y), fu) => x & y & fu === t.U }))
725        XSPerfHistogram(s"cancel_instr_hist_futype_${fuName}", PopCount(validVec.zip(cancelVec).zip(fuTypeVec).map{ case ((x, y), fu) => x & y & fu === t.U }), true.B, 0, params.numEntries, 1)
726      }
727    }
728  }
729}
730
731class IssueQueueJumpBundle extends Bundle {
732  val pc = UInt(VAddrData().dataWidth.W)
733}
734
735class IssueQueueLoadBundle(implicit p: Parameters) extends XSBundle {
736  val fastMatch = UInt(backendParams.LduCnt.W)
737  val fastImm = UInt(12.W)
738}
739
740class IssueQueueIntIO()(implicit p: Parameters, params: IssueBlockParams) extends IssueQueueIO
741
742class IssueQueueIntImp(override val wrapper: IssueQueue)(implicit p: Parameters, iqParams: IssueBlockParams)
743  extends IssueQueueImp(wrapper)
744{
745  io.suggestName("none")
746  override lazy val io = IO(new IssueQueueIntIO).suggestName("io")
747
748  deqBeforeDly.zipWithIndex.foreach{ case (deq, i) => {
749    deq.bits.common.pc.foreach(_ := deqEntryVec(i).bits.payload.pc)
750    deq.bits.common.preDecode.foreach(_ := deqEntryVec(i).bits.payload.preDecodeInfo)
751    deq.bits.common.ftqIdx.foreach(_ := deqEntryVec(i).bits.payload.ftqPtr)
752    deq.bits.common.ftqOffset.foreach(_ := deqEntryVec(i).bits.payload.ftqOffset)
753    deq.bits.common.predictInfo.foreach(x => {
754      x.target := DontCare
755      x.taken := deqEntryVec(i).bits.payload.pred_taken
756    })
757    // for std
758    deq.bits.common.sqIdx.foreach(_ := deqEntryVec(i).bits.payload.sqIdx)
759    // for i2f
760    deq.bits.common.fpu.foreach(_ := deqEntryVec(i).bits.payload.fpu)
761  }}
762}
763
764class IssueQueueVfImp(override val wrapper: IssueQueue)(implicit p: Parameters, iqParams: IssueBlockParams)
765  extends IssueQueueImp(wrapper)
766{
767  s0_enqBits.foreach{ x =>
768    x.srcType(3) := SrcType.vp // v0: mask src
769    x.srcType(4) := SrcType.vp // vl&vtype
770  }
771  deqBeforeDly.zipWithIndex.foreach{ case (deq, i) => {
772    deq.bits.common.fpu.foreach(_ := deqEntryVec(i).bits.payload.fpu)
773    deq.bits.common.vpu.foreach(_ := deqEntryVec(i).bits.payload.vpu)
774    deq.bits.common.vpu.foreach(_.vuopIdx := deqEntryVec(i).bits.payload.uopIdx)
775    deq.bits.common.vpu.foreach(_.lastUop := deqEntryVec(i).bits.payload.lastUop)
776  }}
777}
778
779class IssueQueueMemBundle(implicit p: Parameters, params: IssueBlockParams) extends Bundle {
780  val feedbackIO = Flipped(Vec(params.numDeq, new MemRSFeedbackIO))
781  val checkWait = new Bundle {
782    val stIssuePtr = Input(new SqPtr)
783    val memWaitUpdateReq = Flipped(new MemWaitUpdateReq)
784  }
785  val loadFastMatch = Output(Vec(params.LduCnt, new IssueQueueLoadBundle))
786
787  // vector
788  val sqDeqPtr = OptionWrapper(params.isVecMemIQ, Input(new SqPtr))
789  val lqDeqPtr = OptionWrapper(params.isVecMemIQ, Input(new LqPtr))
790}
791
792class IssueQueueMemIO(implicit p: Parameters, params: IssueBlockParams) extends IssueQueueIO {
793  val memIO = Some(new IssueQueueMemBundle)
794}
795
796class IssueQueueMemAddrImp(override val wrapper: IssueQueue)(implicit p: Parameters, params: IssueBlockParams)
797  extends IssueQueueImp(wrapper) with HasCircularQueuePtrHelper {
798
799  require(params.StdCnt == 0 && (params.LduCnt + params.StaCnt + params.HyuCnt + params.VlduCnt) > 0, "IssueQueueMemAddrImp can only be instance of MemAddr IQ, " +
800    s"StdCnt: ${params.StdCnt}, LduCnt: ${params.LduCnt}, StaCnt: ${params.StaCnt}, HyuCnt: ${params.HyuCnt}")
801  println(s"[IssueQueueMemAddrImp] StdCnt: ${params.StdCnt}, LduCnt: ${params.LduCnt}, StaCnt: ${params.StaCnt}, HyuCnt: ${params.HyuCnt}")
802
803  io.suggestName("none")
804  override lazy val io = IO(new IssueQueueMemIO).suggestName("io")
805  private val memIO = io.memIO.get
806
807  memIO.loadFastMatch := 0.U.asTypeOf(memIO.loadFastMatch) // TODO: is still needed?
808
809  for (i <- io.enq.indices) {
810    val blockNotReleased = isAfter(io.enq(i).bits.sqIdx, memIO.checkWait.stIssuePtr)
811    val storeAddrWaitForIsIssuing = VecInit((0 until StorePipelineWidth).map(i => {
812      memIO.checkWait.memWaitUpdateReq.robIdx(i).valid &&
813        memIO.checkWait.memWaitUpdateReq.robIdx(i).bits.value === io.enq(i).bits.waitForRobIdx.value
814    })).asUInt.orR && !io.enq(i).bits.loadWaitStrict // is waiting for store addr ready
815    s0_enqBits(i).loadWaitBit := io.enq(i).bits.loadWaitBit && !storeAddrWaitForIsIssuing && blockNotReleased
816    // when have vpu
817    if (params.VlduCnt > 0 || params.VstuCnt > 0) {
818      s0_enqBits(i).srcType(3) := SrcType.vp // v0: mask src
819      s0_enqBits(i).srcType(4) := SrcType.vp // vl&vtype
820    }
821  }
822
823  for (i <- entries.io.enq.indices) {
824    entries.io.enq(i).bits.status match { case enqData =>
825      enqData.blocked := false.B // s0_enqBits(i).loadWaitBit
826      enqData.mem.get.strictWait := s0_enqBits(i).loadWaitStrict
827      enqData.mem.get.waitForStd := false.B
828      enqData.mem.get.waitForRobIdx := s0_enqBits(i).waitForRobIdx
829      enqData.mem.get.waitForSqIdx := 0.U.asTypeOf(enqData.mem.get.waitForSqIdx) // generated by sq, will be updated later
830      enqData.mem.get.sqIdx := s0_enqBits(i).sqIdx
831    }
832  }
833  entries.io.fromMem.get.slowResp.zipWithIndex.foreach { case (slowResp, i) =>
834    slowResp.valid := memIO.feedbackIO(i).feedbackSlow.valid
835    slowResp.bits.robIdx := memIO.feedbackIO(i).feedbackSlow.bits.robIdx
836    slowResp.bits.respType := Mux(memIO.feedbackIO(i).feedbackSlow.bits.hit, RSFeedbackType.fuIdle, RSFeedbackType.feedbackInvalid)
837    slowResp.bits.dataInvalidSqIdx := memIO.feedbackIO(i).feedbackSlow.bits.dataInvalidSqIdx
838    slowResp.bits.rfWen := DontCare
839    slowResp.bits.fuType := DontCare
840  }
841
842  entries.io.fromMem.get.fastResp.zipWithIndex.foreach { case (fastResp, i) =>
843    fastResp.valid := memIO.feedbackIO(i).feedbackFast.valid
844    fastResp.bits.robIdx := memIO.feedbackIO(i).feedbackFast.bits.robIdx
845    fastResp.bits.respType := Mux(memIO.feedbackIO(i).feedbackFast.bits.hit, RSFeedbackType.fuIdle, memIO.feedbackIO(i).feedbackFast.bits.sourceType)
846    fastResp.bits.dataInvalidSqIdx := 0.U.asTypeOf(fastResp.bits.dataInvalidSqIdx)
847    fastResp.bits.rfWen := DontCare
848    fastResp.bits.fuType := DontCare
849  }
850
851  entries.io.fromMem.get.memWaitUpdateReq := memIO.checkWait.memWaitUpdateReq
852  entries.io.fromMem.get.stIssuePtr := memIO.checkWait.stIssuePtr
853
854  deqBeforeDly.zipWithIndex.foreach { case (deq, i) =>
855    deq.bits.common.loadWaitBit.foreach(_ := deqEntryVec(i).bits.payload.loadWaitBit)
856    deq.bits.common.waitForRobIdx.foreach(_ := deqEntryVec(i).bits.payload.waitForRobIdx)
857    deq.bits.common.storeSetHit.foreach(_ := deqEntryVec(i).bits.payload.storeSetHit)
858    deq.bits.common.loadWaitStrict.foreach(_ := deqEntryVec(i).bits.payload.loadWaitStrict)
859    deq.bits.common.ssid.foreach(_ := deqEntryVec(i).bits.payload.ssid)
860    deq.bits.common.sqIdx.get := deqEntryVec(i).bits.payload.sqIdx
861    deq.bits.common.lqIdx.get := deqEntryVec(i).bits.payload.lqIdx
862    deq.bits.common.ftqIdx.foreach(_ := deqEntryVec(i).bits.payload.ftqPtr)
863    deq.bits.common.ftqOffset.foreach(_ := deqEntryVec(i).bits.payload.ftqOffset)
864    // when have vpu
865    if (params.VlduCnt > 0 || params.VstuCnt > 0) {
866      deq.bits.common.vpu.foreach(_ := deqEntryVec(i).bits.payload.vpu)
867      deq.bits.common.vpu.foreach(_.vuopIdx := deqEntryVec(i).bits.payload.uopIdx)
868    }
869  }
870}
871
872class IssueQueueVecMemImp(override val wrapper: IssueQueue)(implicit p: Parameters, params: IssueBlockParams)
873  extends IssueQueueImp(wrapper) with HasCircularQueuePtrHelper {
874
875  require((params.VstdCnt + params.VlduCnt + params.VstaCnt) > 0, "IssueQueueVecMemImp can only be instance of VecMem IQ")
876
877  io.suggestName("none")
878  override lazy val io = IO(new IssueQueueMemIO).suggestName("io")
879  private val memIO = io.memIO.get
880
881  def selectOldUop(robIdx: Seq[RobPtr], uopIdx: Seq[UInt], valid: Seq[Bool]): Vec[Bool] = {
882    val compareVec = (0 until robIdx.length).map(i => (0 until i).map(j => isAfter(robIdx(j), robIdx(i)) || (robIdx(j).value === robIdx(i).value && uopIdx(i) < uopIdx(j))))
883    val resultOnehot = VecInit((0 until robIdx.length).map(i => Cat((0 until robIdx.length).map(j =>
884      (if (j < i) !valid(j) || compareVec(i)(j)
885      else if (j == i) valid(i)
886      else !valid(j) || !compareVec(j)(i))
887    )).andR))
888    resultOnehot
889  }
890
891  val robIdxVec = entries.io.robIdx.get
892  val uopIdxVec = entries.io.uopIdx.get
893  val allEntryOldestOH = selectOldUop(robIdxVec, uopIdxVec, validVec)
894
895  finalDeqSelValidVec.head := (allEntryOldestOH.asUInt & canIssueVec.asUInt).orR
896  finalDeqSelOHVec.head := allEntryOldestOH.asUInt & canIssueVec.asUInt
897
898  if (params.isVecMemAddrIQ) {
899    s0_enqBits.foreach{ x =>
900      x.srcType(3) := SrcType.vp // v0: mask src
901      x.srcType(4) := SrcType.vp // vl&vtype
902    }
903
904    for (i <- io.enq.indices) {
905      s0_enqBits(i).loadWaitBit := false.B
906    }
907
908    for (i <- entries.io.enq.indices) {
909      entries.io.enq(i).bits.status match { case enqData =>
910        enqData.blocked := false.B // s0_enqBits(i).loadWaitBit
911        enqData.mem.get.strictWait := s0_enqBits(i).loadWaitStrict
912        enqData.mem.get.waitForStd := false.B
913        enqData.mem.get.waitForRobIdx := s0_enqBits(i).waitForRobIdx
914        enqData.mem.get.waitForSqIdx := 0.U.asTypeOf(enqData.mem.get.waitForSqIdx) // generated by sq, will be updated later
915        enqData.mem.get.sqIdx := s0_enqBits(i).sqIdx
916      }
917
918      entries.io.fromMem.get.slowResp.zipWithIndex.foreach { case (slowResp, i) =>
919        slowResp.valid                 := memIO.feedbackIO(i).feedbackSlow.valid
920        slowResp.bits.robIdx           := memIO.feedbackIO(i).feedbackSlow.bits.robIdx
921        slowResp.bits.respType         := Mux(memIO.feedbackIO(i).feedbackSlow.bits.hit, RSFeedbackType.fuIdle, RSFeedbackType.feedbackInvalid)
922        slowResp.bits.dataInvalidSqIdx := memIO.feedbackIO(i).feedbackSlow.bits.dataInvalidSqIdx
923        slowResp.bits.rfWen := DontCare
924        slowResp.bits.fuType := DontCare
925      }
926
927      entries.io.fromMem.get.fastResp.zipWithIndex.foreach { case (fastResp, i) =>
928        fastResp.valid                 := memIO.feedbackIO(i).feedbackFast.valid
929        fastResp.bits.robIdx           := memIO.feedbackIO(i).feedbackFast.bits.robIdx
930        fastResp.bits.respType         := memIO.feedbackIO(i).feedbackFast.bits.sourceType
931        fastResp.bits.dataInvalidSqIdx := 0.U.asTypeOf(fastResp.bits.dataInvalidSqIdx)
932        fastResp.bits.rfWen := DontCare
933        fastResp.bits.fuType := DontCare
934      }
935
936      entries.io.fromMem.get.memWaitUpdateReq := memIO.checkWait.memWaitUpdateReq
937      entries.io.fromMem.get.stIssuePtr := memIO.checkWait.stIssuePtr
938    }
939  }
940
941  for (i <- entries.io.enq.indices) {
942    entries.io.enq(i).bits.status.vecMem.get match {
943      case enqData =>
944        enqData.sqIdx := s0_enqBits(i).sqIdx
945        enqData.lqIdx := s0_enqBits(i).lqIdx
946        enqData.uopIdx := s0_enqBits(i).uopIdx
947    }
948  }
949
950  entries.io.vecMemIn.get.sqDeqPtr := memIO.sqDeqPtr.get
951  entries.io.vecMemIn.get.lqDeqPtr := memIO.lqDeqPtr.get
952
953  entries.io.fromMem.get.fastResp.zipWithIndex.foreach { case (resp, i) =>
954    resp.bits.uopIdx.get := 0.U // Todo
955  }
956
957  entries.io.fromMem.get.slowResp.zipWithIndex.foreach { case (resp, i) =>
958    resp.bits.uopIdx.get := 0.U // Todo
959  }
960
961  deqBeforeDly.zipWithIndex.foreach { case (deq, i) =>
962    deq.bits.common.sqIdx.foreach(_ := deqEntryVec(i).bits.payload.sqIdx)
963    deq.bits.common.lqIdx.foreach(_ := deqEntryVec(i).bits.payload.lqIdx)
964    if (params.isVecLdAddrIQ) {
965      deq.bits.common.ftqIdx.get := deqEntryVec(i).bits.payload.ftqPtr
966      deq.bits.common.ftqOffset.get := deqEntryVec(i).bits.payload.ftqOffset
967    }
968    deq.bits.common.fpu.foreach(_ := deqEntryVec(i).bits.payload.fpu)
969    deq.bits.common.vpu.foreach(_ := deqEntryVec(i).bits.payload.vpu)
970    deq.bits.common.vpu.foreach(_.vuopIdx := deqEntryVec(i).bits.payload.uopIdx)
971    deq.bits.common.vpu.foreach(_.lastUop := deqEntryVec(i).bits.payload.lastUop)
972  }
973}
974