xref: /XiangShan/src/main/scala/xiangshan/frontend/IBuffer.scala (revision 05cc2a4e2621e9ab232a8bb4b8766c2a436a8612)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.frontend
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import xiangshan._
23import utils._
24import utility._
25import xiangshan.ExceptionNO._
26
27class IBufPtr(implicit p: Parameters) extends CircularQueuePtr[IBufPtr](
28  p => p(XSCoreParamsKey).IBufSize
29) {
30}
31
32class IBufInBankPtr(implicit p: Parameters) extends CircularQueuePtr[IBufInBankPtr](
33  p => p(XSCoreParamsKey).IBufSize / p(XSCoreParamsKey).IBufNBank
34) {
35}
36
37class IBufBankPtr(implicit p: Parameters) extends CircularQueuePtr[IBufBankPtr](
38  p => p(XSCoreParamsKey).IBufNBank
39) {
40}
41
42class IBufferIO(implicit p: Parameters) extends XSBundle {
43  val flush = Input(Bool())
44  val ControlRedirect = Input(Bool())
45  val ControlBTBMissBubble = Input(Bool())
46  val TAGEMissBubble = Input(Bool())
47  val SCMissBubble = Input(Bool())
48  val ITTAGEMissBubble = Input(Bool())
49  val RASMissBubble = Input(Bool())
50  val MemVioRedirect = Input(Bool())
51  val in = Flipped(DecoupledIO(new FetchToIBuffer))
52  val out = Vec(DecodeWidth, DecoupledIO(new CtrlFlow))
53  val full = Output(Bool())
54  val decodeCanAccept = Input(Bool())
55  val stallReason = new StallReasonIO(DecodeWidth)
56}
57
58class IBufEntry(implicit p: Parameters) extends XSBundle {
59  val inst = UInt(32.W)
60  val pc = UInt(VAddrBits.W)
61  val foldpc = UInt(MemPredPCWidth.W)
62  val pd = new PreDecodeInfo
63  val pred_taken = Bool()
64  val ftqPtr = new FtqPtr
65  val ftqOffset = UInt(log2Ceil(PredictWidth).W)
66  val ipf = Bool()
67  val acf = Bool()
68  val crossPageIPFFix = Bool()
69  val triggered = new TriggerCf
70
71  def fromFetch(fetch: FetchToIBuffer, i: Int): IBufEntry = {
72    inst   := fetch.instrs(i)
73    pc     := fetch.pc(i)
74    foldpc := fetch.foldpc(i)
75    pd     := fetch.pd(i)
76    pred_taken := fetch.ftqOffset(i).valid
77    ftqPtr := fetch.ftqPtr
78    ftqOffset := fetch.ftqOffset(i).bits
79    ipf := fetch.ipf(i)
80    acf := fetch.acf(i)
81    crossPageIPFFix := fetch.crossPageIPFFix(i)
82    triggered := fetch.triggered(i)
83    this
84  }
85
86  def toCtrlFlow: CtrlFlow = {
87    val cf = Wire(new CtrlFlow)
88    cf.instr := inst
89    cf.pc := pc
90    cf.foldpc := foldpc
91    cf.exceptionVec := 0.U.asTypeOf(ExceptionVec())
92    cf.exceptionVec(instrPageFault) := ipf
93    cf.exceptionVec(instrAccessFault) := acf
94    cf.trigger := triggered
95    cf.pd := pd
96    cf.pred_taken := pred_taken
97    cf.crossPageIPFFix := crossPageIPFFix
98    cf.storeSetHit := DontCare
99    cf.waitForRobIdx := DontCare
100    cf.loadWaitBit := DontCare
101    cf.loadWaitStrict := DontCare
102    cf.ssid := DontCare
103    cf.ftqPtr := ftqPtr
104    cf.ftqOffset := ftqOffset
105    cf
106  }
107}
108
109class IBuffer(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper with HasPerfEvents {
110  val io = IO(new IBufferIO)
111
112  // io alias
113  private val decodeCanAccept = io.decodeCanAccept
114
115  // Parameter Check
116  private val bankSize = IBufSize / IBufNBank
117  require(IBufSize % IBufNBank == 0, s"IBufNBank should divide IBufSize, IBufNBank: $IBufNBank, IBufSize: $IBufSize")
118  require(IBufNBank >= DecodeWidth,
119    s"IBufNBank should be equal or larger than DecodeWidth, IBufNBank: $IBufNBank, DecodeWidth: $DecodeWidth")
120
121  // IBuffer is organized as raw registers
122  // This is due to IBuffer is a huge queue, read & write port logic should be precisely controlled
123  //                             . + + E E E - .
124  //                             . + + E E E - .
125  //                             . . + E E E - .
126  //                             . . + E E E E -
127  // As shown above, + means enqueue, - means dequeue, E is current content
128  // When dequeue, read port is organized like a banked FIFO
129  // Dequeue reads no more than 1 entry from each bank sequentially, this can be exploit to reduce area
130  // Enqueue writes cannot benefit from this characteristic unless use a SRAM
131  // For detail see Enqueue and Dequeue below
132  private val ibuf: Vec[IBufEntry] = RegInit(VecInit.fill(IBufSize)(0.U.asTypeOf(new IBufEntry)))
133  private val bankedIBufView: Vec[Vec[IBufEntry]] = VecInit.tabulate(IBufNBank)(
134    bankID => VecInit.tabulate(bankSize)(
135      inBankOffset => ibuf(bankID + inBankOffset * IBufNBank)
136    )
137  )
138
139
140  // Bypass wire
141  private val bypassEntries = WireDefault(VecInit.fill(DecodeWidth)(0.U.asTypeOf(Valid(new IBufEntry))))
142  // Normal read wire
143  private val deqEntries = WireDefault(VecInit.fill(DecodeWidth)(0.U.asTypeOf(Valid(new IBufEntry))))
144  // Output register
145  private val outputEntries = RegInit(VecInit.fill(DecodeWidth)(0.U.asTypeOf(Valid(new IBufEntry))))
146
147  // Between Bank
148  private val deqBankPtrVec: Vec[IBufBankPtr] = RegInit(VecInit.tabulate(DecodeWidth)(_.U.asTypeOf(new IBufBankPtr)))
149  private val deqBankPtr: IBufBankPtr = deqBankPtrVec(0)
150  private val deqBankPtrVecNext = Wire(deqBankPtrVec.cloneType)
151  // Inside Bank
152  private val deqInBankPtr: Vec[IBufInBankPtr] = RegInit(VecInit.fill(IBufNBank)(0.U.asTypeOf(new IBufInBankPtr)))
153  private val deqInBankPtrNext = Wire(deqInBankPtr.cloneType)
154
155  val deqPtr = RegInit(0.U.asTypeOf(new IBufPtr))
156  val deqPtrNext = Wire(deqPtr.cloneType)
157
158  val enqPtrVec = RegInit(VecInit.tabulate(PredictWidth)(_.U.asTypeOf(new IBufPtr)))
159  val enqPtr = enqPtrVec(0)
160
161  val numTryEnq = WireDefault(0.U)
162  val numEnq = Mux(io.in.fire, numTryEnq, 0.U)
163
164  val useBypass = enqPtr === deqPtr && decodeCanAccept // empty and decode can accept insts
165  // Record the insts in output entries are from bypass or deq.
166  // Update deqPtr if they are from deq
167  val currentOutUseBypass = RegInit(false.B)
168
169  // The number of decode accepted insts.
170  // Since decode promises accepting insts in order, use priority encoder to simplify the accumulation.
171  private val numOut: UInt = PriorityMuxDefault(io.out.map(x => !x.ready) zip (0 until DecodeWidth).map(_.U), DecodeWidth.U)
172  private val numDeq = Mux(currentOutUseBypass, 0.U, numOut)
173
174  // counter current number of valid
175  val numValid = distanceBetween(enqPtr, deqPtr)
176  val numValidAfterDeq = numValid - numDeq
177  // counter next number of valid
178  val numValidNext = numValid + numEnq - numDeq
179  val allowEnq = RegInit(true.B)
180  val numFromFetch = Mux(io.in.valid, PopCount(io.in.bits.enqEnable), 0.U)
181  val numBypass = PopCount(bypassEntries.map(_.valid))
182
183  allowEnq := (IBufSize - PredictWidth).U >= numValidNext // Disable when almost full
184
185  val enqOffset = VecInit.tabulate(PredictWidth)(i => PopCount(io.in.bits.valid.asBools.take(i)))
186  val enqData = VecInit.tabulate(PredictWidth)(i => Wire(new IBufEntry).fromFetch(io.in.bits, i))
187
188  // when using bypass, bypassed entries do not enqueue
189  when(useBypass) {
190    when(numFromFetch >= DecodeWidth.U) {
191      numTryEnq := numFromFetch - DecodeWidth.U
192    } .otherwise {
193      numTryEnq := 0.U
194    }
195  } .otherwise {
196    numTryEnq := numFromFetch
197  }
198
199  /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
200  // Bypass
201  /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
202  bypassEntries.zipWithIndex.foreach {
203    case (entry, idx) =>
204      // Select
205      val validOH = Range(0, PredictWidth).map {
206        i =>
207          io.in.bits.valid(i) &&
208            io.in.bits.enqEnable(i) &&
209            enqOffset(i) === idx.asUInt
210      } // Should be OneHot
211      entry.valid := validOH.reduce(_ || _) && io.in.fire && !io.flush
212      entry.bits := Mux1H(validOH, enqData)
213
214      // Debug Assertion
215      XSError(PopCount(validOH) > 1.asUInt, "validOH is not OneHot")
216  }
217
218  // => Decode Output
219  // clean register output
220  io.out zip outputEntries foreach {
221    case (io, reg) =>
222      io.valid := reg.valid
223      io.bits := reg.bits.toCtrlFlow
224  }
225  outputEntries zip bypassEntries zip deqEntries foreach {
226    case ((out, bypass), deq) =>
227      when(decodeCanAccept) {
228        out := deq
229        currentOutUseBypass := false.B
230        when(useBypass && io.in.valid) {
231          out := bypass
232          currentOutUseBypass := true.B
233        }
234      }
235  }
236
237  /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
238  // Enqueue
239  /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
240  io.in.ready := allowEnq
241  // Data
242  ibuf.zipWithIndex.foreach {
243    case (entry, idx) => {
244      // Select
245      val validOH = Range(0, PredictWidth).map {
246        i =>
247          val useBypassMatch = enqOffset(i) >= DecodeWidth.U &&
248            enqPtrVec(enqOffset(i) - DecodeWidth.U).value === idx.asUInt
249          val normalMatch = enqPtrVec(enqOffset(i)).value === idx.asUInt
250          val m = Mux(useBypass, useBypassMatch, normalMatch) // when using bypass, bypassed entries do not enqueue
251
252          io.in.bits.valid(i) && io.in.bits.enqEnable(i) && m
253      } // Should be OneHot
254      val wen = validOH.reduce(_ || _) && io.in.fire && !io.flush
255
256      // Write port
257      // Each IBuffer entry has a PredictWidth -> 1 Mux
258      val writeEntry = Mux1H(validOH, enqData)
259      entry := Mux(wen, writeEntry, entry)
260
261      // Debug Assertion
262      XSError(PopCount(validOH) > 1.asUInt, "validOH is not OneHot")
263    }
264  }
265  // Pointer maintenance
266  when (io.in.fire && !io.flush) {
267    enqPtrVec := VecInit(enqPtrVec.map(_ + numTryEnq))
268  }
269
270  /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
271  // Dequeue
272  /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
273  val validVec = Mux(numValidAfterDeq >= DecodeWidth.U,
274    ((1 << DecodeWidth) - 1).U,
275    UIntToMask(numValidAfterDeq(log2Ceil(DecodeWidth) - 1, 0), DecodeWidth)
276  )
277  // Data
278  // Read port
279  // 2-stage, IBufNBank * (bankSize -> 1) + IBufNBank -> 1
280  // Should be better than IBufSize -> 1 in area, with no significant latency increase
281  private val readStage1: Vec[IBufEntry] = VecInit.tabulate(IBufNBank)(
282    bankID => Mux1H(UIntToOH(deqInBankPtrNext(bankID).value), bankedIBufView(bankID))
283  )
284  for (i <- 0 until DecodeWidth) {
285    deqEntries(i).valid := validVec(i)
286    deqEntries(i).bits := Mux1H(UIntToOH(deqBankPtrVecNext(i).value), readStage1)
287  }
288  // Pointer maintenance
289  deqBankPtrVecNext := Mux(decodeCanAccept, VecInit(deqBankPtrVec.map(_ + numDeq)), deqBankPtrVec)
290  deqPtrNext := Mux(decodeCanAccept, deqPtr + numDeq, deqPtr)
291  deqPtr := Mux(decodeCanAccept, deqPtr + numDeq, deqPtr)
292  deqInBankPtrNext.zip(deqInBankPtr).zipWithIndex.foreach {
293    case ((ptrNext, ptr), idx) => {
294      // validVec[k] == bankValid[deqBankPtr + k]
295      // So bankValid[n] == validVec[n - deqBankPtr]
296      val validIdx = Mux(idx.asUInt >= deqBankPtr.value,
297        idx.asUInt - deqBankPtr.value,
298        ((idx + IBufNBank).asUInt - deqBankPtr.value)(log2Ceil(IBufNBank) - 1, 0)
299      )(log2Ceil(DecodeWidth) - 1, 0)
300      val bankAdvance = Mux(validIdx >= DecodeWidth.U,
301        false.B,
302        io.out(validIdx).ready // `ready` depends on `valid`, so we need only `ready`, not fire
303      ) && !currentOutUseBypass
304      ptrNext := Mux(bankAdvance , ptr + 1.U, ptr)
305    }
306  }
307
308  // Flush
309  when (io.flush) {
310    allowEnq := true.B
311    enqPtrVec := enqPtrVec.indices.map(_.U.asTypeOf(new IBufPtr))
312    deqBankPtrVec := deqBankPtrVec.indices.map(_.U.asTypeOf(new IBufBankPtr))
313    deqInBankPtr := VecInit.fill(IBufNBank)(0.U.asTypeOf(new IBufInBankPtr))
314    deqPtr := 0.U.asTypeOf(new IBufPtr())
315    outputEntries.foreach(_.valid := false.B)
316  }.otherwise {
317    deqPtr := deqPtrNext
318    deqInBankPtr := deqInBankPtrNext
319    deqBankPtrVec := deqBankPtrVecNext
320  }
321  io.full := !allowEnq
322
323  /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
324  // TopDown
325  /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
326  val topdown_stage = RegInit(0.U.asTypeOf(new FrontendTopDownBundle))
327  topdown_stage := io.in.bits.topdown_info
328  when(io.flush) {
329    when(io.ControlRedirect) {
330      when(io.ControlBTBMissBubble) {
331        topdown_stage.reasons(TopDownCounters.BTBMissBubble.id) := true.B
332      }.elsewhen(io.TAGEMissBubble) {
333        topdown_stage.reasons(TopDownCounters.TAGEMissBubble.id) := true.B
334      }.elsewhen(io.SCMissBubble) {
335        topdown_stage.reasons(TopDownCounters.SCMissBubble.id) := true.B
336      }.elsewhen(io.ITTAGEMissBubble) {
337        topdown_stage.reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B
338      }.elsewhen(io.RASMissBubble) {
339        topdown_stage.reasons(TopDownCounters.RASMissBubble.id) := true.B
340      }
341    }.elsewhen(io.MemVioRedirect) {
342      topdown_stage.reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B
343    }.otherwise {
344      topdown_stage.reasons(TopDownCounters.OtherRedirectBubble.id) := true.B
345    }
346  }
347
348
349  val dequeueInsufficient = Wire(Bool())
350  val matchBubble = Wire(UInt(log2Up(TopDownCounters.NumStallReasons.id).W))
351  val deqValidCount = PopCount(validVec.asBools)
352  val deqWasteCount = DecodeWidth.U - deqValidCount
353  dequeueInsufficient := deqValidCount < DecodeWidth.U
354  matchBubble := (TopDownCounters.NumStallReasons.id - 1).U - PriorityEncoder(topdown_stage.reasons.reverse)
355
356  io.stallReason.reason.map(_ := 0.U)
357  for (i <- 0 until DecodeWidth) {
358    when(i.U < deqWasteCount) {
359      io.stallReason.reason(DecodeWidth - i - 1) := matchBubble
360    }
361  }
362
363  when(!(deqWasteCount === DecodeWidth.U || topdown_stage.reasons.asUInt.orR)) {
364    // should set reason for FetchFragmentationStall
365    // topdown_stage.reasons(TopDownCounters.FetchFragmentationStall.id) := true.B
366    for (i <- 0 until DecodeWidth) {
367      when(i.U < deqWasteCount) {
368        io.stallReason.reason(DecodeWidth - i - 1) := TopDownCounters.FetchFragBubble.id.U
369      }
370    }
371  }
372
373  when(io.stallReason.backReason.valid) {
374    io.stallReason.reason.map(_ := io.stallReason.backReason.bits)
375  }
376
377  // Debug info
378  XSError(
379    deqPtr.value =/= deqBankPtr.value + deqInBankPtr(deqBankPtr.value).value * IBufNBank.asUInt,
380    "Dequeue PTR mismatch"
381  )
382  XSError(isBefore(enqPtr, deqPtr) && !isFull(enqPtr, deqPtr), "\ndeqPtr is older than enqPtr!\n")
383
384  XSDebug(io.flush, "IBuffer Flushed\n")
385
386  when(io.in.fire) {
387    XSDebug("Enque:\n")
388    XSDebug(p"MASK=${Binary(io.in.bits.valid)}\n")
389    for(i <- 0 until PredictWidth){
390      XSDebug(p"PC=${Hexadecimal(io.in.bits.pc(i))} ${Hexadecimal(io.in.bits.instrs(i))}\n")
391    }
392  }
393
394  for (i <- 0 until DecodeWidth) {
395    XSDebug(io.out(i).fire,
396      p"deq: ${Hexadecimal(io.out(i).bits.instr)} PC=${Hexadecimal(io.out(i).bits.pc)}" +
397      p"v=${io.out(i).valid} r=${io.out(i).ready} " +
398      p"excpVec=${Binary(io.out(i).bits.exceptionVec.asUInt)} crossPageIPF=${io.out(i).bits.crossPageIPFFix}\n")
399  }
400
401  XSDebug(p"numValid: ${numValid}\n")
402  XSDebug(p"EnqNum: ${numEnq}\n")
403  XSDebug(p"DeqNum: ${numDeq}\n")
404
405  val afterInit = RegInit(false.B)
406  val headBubble = RegInit(false.B)
407  when (io.in.fire) { afterInit := true.B }
408  when (io.flush) {
409    headBubble := true.B
410  } .elsewhen(numValid =/= 0.U) {
411    headBubble := false.B
412  }
413  val instrHungry = afterInit && (numValid === 0.U) && !headBubble
414
415  QueuePerf(IBufSize, numValid, !allowEnq)
416  XSPerfAccumulate("flush", io.flush)
417  XSPerfAccumulate("hungry", instrHungry)
418
419  val ibuffer_IDWidth_hvButNotFull = afterInit && (numValid =/= 0.U) && (numValid < DecodeWidth.U) && !headBubble
420  XSPerfAccumulate("ibuffer_IDWidth_hvButNotFull", ibuffer_IDWidth_hvButNotFull)
421  /*
422  XSPerfAccumulate("ICacheMissBubble", Mux(matchBubbleVec(TopDownCounters.ICacheMissBubble.id), deqWasteCount, 0.U))
423  XSPerfAccumulate("ITLBMissBubble", Mux(matchBubbleVec(TopDownCounters.ITLBMissBubble.id), deqWasteCount, 0.U))
424  XSPerfAccumulate("ControlRedirectBubble", Mux(matchBubbleVec(TopDownCounters.ControlRedirectBubble.id), deqWasteCount, 0.U))
425  XSPerfAccumulate("MemVioRedirectBubble", Mux(matchBubbleVec(TopDownCounters.MemVioRedirectBubble.id), deqWasteCount, 0.U))
426  XSPerfAccumulate("OtherRedirectBubble", Mux(matchBubbleVec(TopDownCounters.OtherRedirectBubble.id), deqWasteCount, 0.U))
427  XSPerfAccumulate("BTBMissBubble", Mux(matchBubbleVec(TopDownCounters.BTBMissBubble.id), deqWasteCount, 0.U))
428  XSPerfAccumulate("OverrideBubble", Mux(matchBubbleVec(TopDownCounters.OverrideBubble.id), deqWasteCount, 0.U))
429  XSPerfAccumulate("FtqUpdateBubble", Mux(matchBubbleVec(TopDownCounters.FtqUpdateBubble.id), deqWasteCount, 0.U))
430  XSPerfAccumulate("FtqFullStall", Mux(matchBubbleVec(TopDownCounters.FtqFullStall.id), deqWasteCount, 0.U))
431  XSPerfAccumulate("FetchFragmentBubble",
432  Mux(deqWasteCount === DecodeWidth.U || topdown_stage.reasons.asUInt.orR, 0.U, deqWasteCount))
433  XSPerfAccumulate("TAGEMissBubble", Mux(matchBubbleVec(TopDownCounters.TAGEMissBubble.id), deqWasteCount, 0.U))
434  XSPerfAccumulate("SCMissBubble", Mux(matchBubbleVec(TopDownCounters.SCMissBubble.id), deqWasteCount, 0.U))
435  XSPerfAccumulate("ITTAGEMissBubble", Mux(matchBubbleVec(TopDownCounters.ITTAGEMissBubble.id), deqWasteCount, 0.U))
436  XSPerfAccumulate("RASMissBubble", Mux(matchBubbleVec(TopDownCounters.RASMissBubble.id), deqWasteCount, 0.U))
437  */
438
439  val perfEvents = Seq(
440    ("IBuffer_Flushed  ", io.flush                                                                     ),
441    ("IBuffer_hungry   ", instrHungry                                                                  ),
442    ("IBuffer_1_4_valid", (numValid >  (0*(IBufSize/4)).U) & (numValid < (1*(IBufSize/4)).U)   ),
443    ("IBuffer_2_4_valid", (numValid >= (1*(IBufSize/4)).U) & (numValid < (2*(IBufSize/4)).U)   ),
444    ("IBuffer_3_4_valid", (numValid >= (2*(IBufSize/4)).U) & (numValid < (3*(IBufSize/4)).U)   ),
445    ("IBuffer_4_4_valid", (numValid >= (3*(IBufSize/4)).U) & (numValid < (4*(IBufSize/4)).U)   ),
446    ("IBuffer_full     ",  numValid.andR                                                           ),
447    ("Front_Bubble     ", PopCount((0 until DecodeWidth).map(i => io.out(i).ready && !io.out(i).valid)))
448  )
449  generatePerfEvent()
450}
451