xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueueReplay.scala (revision 870f462d572cd0ef6bf86c91dcda5a5fab6e99d3)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16package xiangshan.mem
17
18import chisel3._
19import chisel3.util._
20import chipsalliance.rocketchip.config._
21import xiangshan._
22import xiangshan.backend.rob.{RobLsqIO, RobPtr}
23import xiangshan.cache._
24import xiangshan.backend.fu.fpu.FPU
25import xiangshan.backend.fu.FuConfig._
26import xiangshan.cache._
27import xiangshan.frontend.FtqPtr
28import xiangshan.ExceptionNO._
29import xiangshan.cache.wpu.ReplayCarry
30import xiangshan.mem.mdp._
31import utils._
32import utility._
33import xiangshan.backend.Bundles.{DynInst, MemExuOutput}
34
35object LoadReplayCauses {
36  // these causes have priority, lower coding has higher priority.
37  // when load replay happens, load unit will select highest priority
38  // from replay causes vector
39
40  /*
41   * Warning:
42   * ************************************************************
43   * * Don't change the priority. If the priority is changed,   *
44   * * deadlock may occur. If you really need to change or      *
45   * * add priority, please ensure that no deadlock will occur. *
46   * ************************************************************
47   *
48   */
49  // tlb miss check
50  val C_TM  = 0
51  // st-ld violation
52  val C_NK  = 1
53  // st-ld violation re-execute check
54  val C_MA  = 2
55  // store-to-load-forwarding check
56  val C_FF  = 3
57  // dcache replay check
58  val C_DR  = 4
59  // dcache miss check
60  val C_DM  = 5
61  // dcache bank conflict check
62  val C_BC  = 6
63  // RAR queue accept check
64  val C_RAR = 7
65  // RAW queue accept check
66  val C_RAW = 8
67  // total causes
68  val allCauses         = 9
69}
70
71class AgeDetector(numEntries: Int, numEnq: Int, regOut: Boolean = true)(implicit p: Parameters) extends XSModule {
72  val io = IO(new Bundle {
73    // NOTE: deq and enq may come at the same cycle.
74    val enq = Vec(numEnq, Input(UInt(numEntries.W)))
75    val deq = Input(UInt(numEntries.W))
76    val ready = Input(UInt(numEntries.W))
77    val out = Output(UInt(numEntries.W))
78  })
79
80  // age(i)(j): entry i enters queue before entry j
81  val age = Seq.fill(numEntries)(Seq.fill(numEntries)(RegInit(false.B)))
82  val nextAge = Seq.fill(numEntries)(Seq.fill(numEntries)(Wire(Bool())))
83
84  // to reduce reg usage, only use upper matrix
85  def get_age(row: Int, col: Int): Bool = if (row <= col) age(row)(col) else !age(col)(row)
86  def get_next_age(row: Int, col: Int): Bool = if (row <= col) nextAge(row)(col) else !nextAge(col)(row)
87  def isFlushed(i: Int): Bool = io.deq(i)
88  def isEnqueued(i: Int, numPorts: Int = -1): Bool = {
89    val takePorts = if (numPorts == -1) io.enq.length else numPorts
90    takePorts match {
91      case 0 => false.B
92      case 1 => io.enq.head(i) && !isFlushed(i)
93      case n => VecInit(io.enq.take(n).map(_(i))).asUInt.orR && !isFlushed(i)
94    }
95  }
96
97  for ((row, i) <- nextAge.zipWithIndex) {
98    val thisValid = get_age(i, i) || isEnqueued(i)
99    for ((elem, j) <- row.zipWithIndex) {
100      when (isFlushed(i)) {
101        // (1) when entry i is flushed or dequeues, set row(i) to false.B
102        elem := false.B
103      }.elsewhen (isFlushed(j)) {
104        // (2) when entry j is flushed or dequeues, set column(j) to validVec
105        elem := thisValid
106      }.elsewhen (isEnqueued(i)) {
107        // (3) when entry i enqueues from port k,
108        // (3.1) if entry j enqueues from previous ports, set to false
109        // (3.2) otherwise, set to true if and only of entry j is invalid
110        // overall: !jEnqFromPreviousPorts && !jIsValid
111        val sel = io.enq.map(_(i))
112        val result = (0 until numEnq).map(k => isEnqueued(j, k))
113        // why ParallelMux: sel must be one-hot since enq is one-hot
114        elem := !get_age(j, j) && !ParallelMux(sel, result)
115      }.otherwise {
116        // default: unchanged
117        elem := get_age(i, j)
118      }
119      age(i)(j) := elem
120    }
121  }
122
123  def getOldest(get: (Int, Int) => Bool): UInt = {
124    VecInit((0 until numEntries).map(i => {
125      io.ready(i) & VecInit((0 until numEntries).map(j => if (i != j) !io.ready(j) || get(i, j) else true.B)).asUInt.andR
126    })).asUInt
127  }
128  val best = getOldest(get_age)
129  val nextBest = getOldest(get_next_age)
130
131  io.out := (if (regOut) best else nextBest)
132}
133
134object AgeDetector {
135  def apply(numEntries: Int, enq: Vec[UInt], deq: UInt, ready: UInt)(implicit p: Parameters): Valid[UInt] = {
136    val age = Module(new AgeDetector(numEntries, enq.length, regOut = true))
137    age.io.enq := enq
138    age.io.deq := deq
139    age.io.ready:= ready
140    val out = Wire(Valid(UInt(deq.getWidth.W)))
141    out.valid := age.io.out.orR
142    out.bits := age.io.out
143    out
144  }
145}
146
147
148class LoadQueueReplay(implicit p: Parameters) extends XSModule
149  with HasDCacheParameters
150  with HasCircularQueuePtrHelper
151  with HasLoadHelper
152  with HasPerfEvents
153{
154  val io = IO(new Bundle() {
155    // control
156    val redirect = Flipped(ValidIO(new Redirect))
157
158    // from load unit s3
159    val enq = Vec(LoadPipelineWidth, Flipped(Decoupled(new LqWriteBundle)))
160
161    // from sta s1
162    val storeAddrIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle)))
163
164    // from std s1
165    val storeDataIn = Vec(StorePipelineWidth, Flipped(Valid(new MemExuOutput)))
166
167    // queue-based replay
168    val replay = Vec(LoadPipelineWidth, Decoupled(new LsPipelineBundle))
169    val refill = Flipped(ValidIO(new Refill))
170    val tl_d_channel = Input(new DcacheToLduForwardIO)
171
172    // from StoreQueue
173    val stAddrReadySqPtr = Input(new SqPtr)
174    val stAddrReadyVec   = Input(Vec(StoreQueueSize, Bool()))
175    val stDataReadySqPtr = Input(new SqPtr)
176    val stDataReadyVec   = Input(Vec(StoreQueueSize, Bool()))
177
178    //
179    val sqEmpty = Input(Bool())
180    val lqFull  = Output(Bool())
181    val ldWbPtr = Input(new LqPtr)
182    val rarFull = Input(Bool())
183    val rawFull = Input(Bool())
184    val l2_hint  = Input(Valid(new L2ToL1Hint()))
185    val tlbReplayDelayCycleCtrl = Vec(4, Input(UInt(ReSelectLen.W)))
186  })
187
188  println("LoadQueueReplay size: " + LoadQueueReplaySize)
189  //  LoadQueueReplay field:
190  //  +-----------+---------+-------+-------------+--------+
191  //  | Allocated | MicroOp | VAddr |    Cause    |  Flags |
192  //  +-----------+---------+-------+-------------+--------+
193  //  Allocated   : entry has been allocated already
194  //  MicroOp     : inst's microOp
195  //  VAddr       : virtual address
196  //  Cause       : replay cause
197  //  Flags       : rar/raw queue allocate flags
198  val allocated = RegInit(VecInit(List.fill(LoadQueueReplaySize)(false.B))) // The control signals need to explicitly indicate the initial value
199  val scheduled = RegInit(VecInit(List.fill(LoadQueueReplaySize)(false.B)))
200  val uop = Reg(Vec(LoadQueueReplaySize, new DynInst))
201  val vaddrModule = Module(new LqVAddrModule(
202    gen = UInt(VAddrBits.W),
203    numEntries = LoadQueueReplaySize,
204    numRead = LoadPipelineWidth,
205    numWrite = LoadPipelineWidth,
206    numWBank = LoadQueueNWriteBanks,
207    numWDelay = 2,
208    numCamPort = 0))
209  vaddrModule.io := DontCare
210  val debug_vaddr = RegInit(VecInit(List.fill(LoadQueueReplaySize)(0.U(VAddrBits.W))))
211  val cause = RegInit(VecInit(List.fill(LoadQueueReplaySize)(0.U(LoadReplayCauses.allCauses.W))))
212
213  // freeliset: store valid entries index.
214  // +---+---+--------------+-----+-----+
215  // | 0 | 1 |      ......  | n-2 | n-1 |
216  // +---+---+--------------+-----+-----+
217  val freeList = Module(new FreeList(
218    size = LoadQueueReplaySize,
219    allocWidth = LoadPipelineWidth,
220    freeWidth = 4,
221    moduleName = "LoadQueueReplay freelist"
222  ))
223  freeList.io := DontCare
224  /**
225   * used for re-select control
226   */
227  val credit = RegInit(VecInit(List.fill(LoadQueueReplaySize)(0.U(ReSelectLen.W))))
228  val selBlocked = RegInit(VecInit(List.fill(LoadQueueReplaySize)(false.B)))
229  //  Ptrs to control which cycle to choose
230  val blockPtrTlb = RegInit(VecInit(List.fill(LoadQueueReplaySize)(0.U(2.W))))
231  val blockPtrCache = RegInit(VecInit(List.fill(LoadQueueReplaySize)(0.U(2.W))))
232  val blockPtrOthers = RegInit(VecInit(List.fill(LoadQueueReplaySize)(0.U(2.W))))
233  //  Specific cycles to block
234  val blockCyclesTlb = Reg(Vec(4, UInt(ReSelectLen.W)))
235  blockCyclesTlb := io.tlbReplayDelayCycleCtrl
236  val blockCyclesCache = RegInit(VecInit(Seq(0.U(ReSelectLen.W), 0.U(ReSelectLen.W), 0.U(ReSelectLen.W), 0.U(ReSelectLen.W))))
237  val blockCyclesOthers = RegInit(VecInit(Seq(0.U(ReSelectLen.W), 0.U(ReSelectLen.W), 0.U(ReSelectLen.W), 0.U(ReSelectLen.W))))
238  val blockSqIdx = Reg(Vec(LoadQueueReplaySize, new SqPtr))
239  // block causes
240  val blockByTlbMiss = RegInit(VecInit(List.fill(LoadQueueReplaySize)(false.B)))
241  val blockByForwardFail = RegInit(VecInit(List.fill(LoadQueueReplaySize)(false.B)))
242  val blockByMemAmb = RegInit(VecInit(List.fill(LoadQueueReplaySize)(false.B)))
243  val blockByCacheMiss = RegInit(VecInit(List.fill(LoadQueueReplaySize)(false.B)))
244  val blockByRARReject = RegInit(VecInit(List.fill(LoadQueueReplaySize)(false.B)))
245  val blockByRAWReject = RegInit(VecInit(List.fill(LoadQueueReplaySize)(false.B)))
246  val blockByOthers = RegInit(VecInit(List.fill(LoadQueueReplaySize)(false.B)))
247  // DCache miss block
248  val missMSHRId = RegInit(VecInit(List.fill(LoadQueueReplaySize)(0.U((log2Up(cfg.nMissEntries).W)))))
249  // Has this load already updated dcache replacement?
250  val replacementUpdated = RegInit(VecInit(List.fill(LoadQueueReplaySize)(false.B)))
251  val trueCacheMissReplay = WireInit(VecInit(cause.map(_(LoadReplayCauses.C_DM))))
252  val creditUpdate = WireInit(VecInit(List.fill(LoadQueueReplaySize)(0.U(ReSelectLen.W))))
253  (0 until LoadQueueReplaySize).map(i => {
254    creditUpdate(i) := Mux(credit(i) > 0.U(ReSelectLen.W), credit(i)-1.U(ReSelectLen.W), credit(i))
255    selBlocked(i) := creditUpdate(i) =/= 0.U(ReSelectLen.W) || credit(i) =/= 0.U(ReSelectLen.W)
256  })
257  val replayCarryReg = RegInit(VecInit(List.fill(LoadQueueReplaySize)(ReplayCarry(nWays, 0.U, false.B))))
258  val dataInLastBeatReg = RegInit(VecInit(List.fill(LoadQueueReplaySize)(false.B)))
259
260  /**
261   * Enqueue
262   */
263  val canEnqueue = io.enq.map(_.valid)
264  val cancelEnq = io.enq.map(enq => enq.bits.uop.robIdx.needFlush(io.redirect))
265  val needReplay = io.enq.map(enq => enq.bits.rep_info.need_rep)
266  val hasExceptions = io.enq.map(enq => ExceptionNO.selectByFu(enq.bits.uop.exceptionVec, LduCfg).asUInt.orR && !enq.bits.tlbMiss)
267  val loadReplay = io.enq.map(enq => enq.bits.isLoadReplay)
268  val needEnqueue = VecInit((0 until LoadPipelineWidth).map(w => {
269    canEnqueue(w) && !cancelEnq(w) && needReplay(w) && !hasExceptions(w)
270  }))
271  val canFreeVec = VecInit((0 until LoadPipelineWidth).map(w => {
272    canEnqueue(w) && loadReplay(w) && (!needReplay(w) || hasExceptions(w))
273  }))
274
275  // select LoadPipelineWidth valid index.
276  val lqFull = freeList.io.empty
277  val lqFreeNums = freeList.io.validCount
278
279  // replay logic
280  // release logic generation
281  val storeAddrInSameCycleVec = Wire(Vec(LoadQueueReplaySize, Bool()))
282  val storeDataInSameCycleVec = Wire(Vec(LoadQueueReplaySize, Bool()))
283  val addrNotBlockVec = Wire(Vec(LoadQueueReplaySize, Bool()))
284  val dataNotBlockVec = Wire(Vec(LoadQueueReplaySize, Bool()))
285  val storeAddrValidVec = addrNotBlockVec.asUInt | storeAddrInSameCycleVec.asUInt
286  val storeDataValidVec = dataNotBlockVec.asUInt | storeDataInSameCycleVec.asUInt
287
288  // store data valid check
289  val stAddrReadyVec = io.stAddrReadyVec
290  val stDataReadyVec = io.stDataReadyVec
291
292  for (i <- 0 until LoadQueueReplaySize) {
293    // dequeue
294    //  FIXME: store*Ptr is not accurate
295    dataNotBlockVec(i) := !isBefore(io.stDataReadySqPtr, blockSqIdx(i)) || stDataReadyVec(blockSqIdx(i).value) || io.sqEmpty // for better timing
296    addrNotBlockVec(i) := !isBefore(io.stAddrReadySqPtr, blockSqIdx(i)) || stAddrReadyVec(blockSqIdx(i).value) || io.sqEmpty // for better timing
297
298    // store address execute
299    storeAddrInSameCycleVec(i) := VecInit((0 until StorePipelineWidth).map(w => {
300      io.storeAddrIn(w).valid &&
301      !io.storeAddrIn(w).bits.miss &&
302      blockSqIdx(i) === io.storeAddrIn(w).bits.uop.sqIdx
303    })).asUInt.orR // for better timing
304
305    // store data execute
306    storeDataInSameCycleVec(i) := VecInit((0 until StorePipelineWidth).map(w => {
307      io.storeDataIn(w).valid &&
308      blockSqIdx(i) === io.storeDataIn(w).bits.uop.sqIdx
309    })).asUInt.orR // for better timing
310
311  }
312
313  // store addr issue check
314  val stAddrDeqVec = Wire(Vec(LoadQueueReplaySize, Bool()))
315  (0 until LoadQueueReplaySize).map(i => {
316    stAddrDeqVec(i) := allocated(i) && storeAddrValidVec(i)
317  })
318
319  // store data issue check
320  val stDataDeqVec = Wire(Vec(LoadQueueReplaySize, Bool()))
321  (0 until LoadQueueReplaySize).map(i => {
322    stDataDeqVec(i) := allocated(i) && storeDataValidVec(i)
323  })
324
325  // update block condition
326  (0 until LoadQueueReplaySize).map(i => {
327    blockByForwardFail(i) := Mux(blockByForwardFail(i) && stDataDeqVec(i), false.B, blockByForwardFail(i))
328    blockByMemAmb(i) := Mux(blockByMemAmb(i) && stAddrDeqVec(i), false.B, blockByMemAmb(i))
329    blockByCacheMiss(i) := Mux(blockByCacheMiss(i) && io.tl_d_channel.valid && io.tl_d_channel.mshrid === missMSHRId(i), false.B, blockByCacheMiss(i))
330
331    when (blockByCacheMiss(i) && io.tl_d_channel.valid && io.tl_d_channel.mshrid === missMSHRId(i)) { creditUpdate(i) := 0.U }
332    when (blockByRARReject(i) && (!io.rarFull || !isAfter(uop(i).lqIdx, io.ldWbPtr))) { blockByRARReject(i) := false.B }
333    when (blockByRAWReject(i) && (!io.rawFull || !isAfter(uop(i).sqIdx, io.stAddrReadySqPtr))) { blockByRAWReject(i) := false.B }
334    when (blockByTlbMiss(i) && creditUpdate(i) === 0.U) { blockByTlbMiss(i) := false.B }
335    when (blockByOthers(i) && creditUpdate(i) === 0.U) { blockByOthers(i) := false.B }
336  })
337
338  //  Replay is splitted into 3 stages
339  require((LoadQueueReplaySize % LoadPipelineWidth) == 0)
340  def getRemBits(input: UInt)(rem: Int): UInt = {
341    VecInit((0 until LoadQueueReplaySize / LoadPipelineWidth).map(i => { input(LoadPipelineWidth * i + rem) })).asUInt
342  }
343
344  def getRemSeq(input: Seq[Seq[Bool]])(rem: Int) = {
345    (0 until LoadQueueReplaySize / LoadPipelineWidth).map(i => { input(LoadPipelineWidth * i + rem) })
346  }
347
348  // stage1: select 2 entries and read their vaddr
349  val s0_oldestSel = Wire(Vec(LoadPipelineWidth, Valid(UInt(log2Up(LoadQueueReplaySize + 1).W))))
350  val s1_can_go = Wire(Vec(LoadPipelineWidth, Bool()))
351  val s1_oldestSel = Wire(Vec(LoadPipelineWidth, Valid(UInt(log2Up(LoadQueueReplaySize + 1).W))))
352  val s2_can_go = Wire(Vec(LoadPipelineWidth, Bool()))
353  val s2_oldestSel = Wire(Vec(LoadPipelineWidth, Valid(UInt(log2Up(LoadQueueReplaySize + 1).W))))
354
355  // generate mask
356  val needCancel = Wire(Vec(LoadQueueReplaySize, Bool()))
357  // generate enq mask
358  val selectIndexOH = Wire(Vec(LoadPipelineWidth, UInt(LoadQueueReplaySize.W)))
359  val s0_loadEnqFireMask = io.enq.map(x => x.fire && !x.bits.isLoadReplay).zip(selectIndexOH).map(x => Mux(x._1, x._2, 0.U))
360  val s0_remLoadEnqFireVec = s0_loadEnqFireMask.map(x => VecInit((0 until LoadPipelineWidth).map(rem => getRemBits(x)(rem))))
361  val s0_remEnqSelVec = Seq.tabulate(LoadPipelineWidth)(w => VecInit(s0_remLoadEnqFireVec.map(x => x(w))))
362
363  // generate free mask
364  val s0_loadFreeSelMask = needCancel.asUInt
365  val s0_remFreeSelVec = VecInit(Seq.tabulate(LoadPipelineWidth)(rem => getRemBits(s0_loadFreeSelMask)(rem)))
366
367  // l2 hint wakes up cache missed load
368  // l2 will send GrantData in next 2/3 cycle, wake up the missed load early and sent them to load pipe, so them will hit the data in D channel or mshr in load S1
369  val s0_loadHintWakeMask = VecInit((0 until LoadQueueReplaySize).map(i => {
370    allocated(i) && !scheduled(i) && blockByCacheMiss(i) && missMSHRId(i) === io.l2_hint.bits.sourceId && io.l2_hint.valid
371  })).asUInt()
372  // l2 will send 2 beats data in 2 cycles, so if data needed by this load is in first beat, select it this cycle, otherwise next cycle
373  val s0_loadHintSelMask = s0_loadHintWakeMask & VecInit(dataInLastBeatReg.map(!_)).asUInt
374  val s0_remLoadHintSelMask = VecInit((0 until LoadPipelineWidth).map(rem => getRemBits(s0_loadHintSelMask)(rem)))
375  val s0_hintSelValid = s0_loadHintSelMask.orR
376
377  // wake up cache missed load
378  (0 until LoadQueueReplaySize).foreach(i => {
379    when(s0_loadHintWakeMask(i)) {
380      blockByCacheMiss(i) := false.B
381      creditUpdate(i) := 0.U
382    }
383  })
384
385  // generate replay mask
386  // replay select priority is given as follow
387  // 1. hint wake up load
388  // 2. higher priority load
389  // 3. lower priority load
390  val s0_loadHigherPriorityReplaySelMask = VecInit((0 until LoadQueueReplaySize).map(i => {
391    val blocked = selBlocked(i) || blockByMemAmb(i) || blockByRARReject(i) || blockByRAWReject(i) || blockByOthers(i) || blockByForwardFail(i) || blockByCacheMiss(i) || blockByTlbMiss(i)
392    val hasHigherPriority = cause(i)(LoadReplayCauses.C_DM) || cause(i)(LoadReplayCauses.C_FF)
393    allocated(i) && !scheduled(i) && !blocked && hasHigherPriority
394  })).asUInt // use uint instead vec to reduce verilog lines
395  val s0_loadLowerPriorityReplaySelMask = VecInit((0 until LoadQueueReplaySize).map(i => {
396    val blocked = selBlocked(i) || blockByMemAmb(i) || blockByRARReject(i) || blockByRAWReject(i) || blockByOthers(i) || blockByForwardFail(i) || blockByCacheMiss(i) || blockByTlbMiss(i)
397    val hasLowerPriority = !cause(i)(LoadReplayCauses.C_DM) && !cause(i)(LoadReplayCauses.C_FF)
398    allocated(i) && !scheduled(i) && !blocked && hasLowerPriority
399  })).asUInt // use uint instead vec to reduce verilog lines
400  val s0_loadNormalReplaySelMask = s0_loadLowerPriorityReplaySelMask | s0_loadHigherPriorityReplaySelMask | s0_loadHintSelMask
401  val s0_remNormalReplaySelVec = VecInit((0 until LoadPipelineWidth).map(rem => getRemBits(s0_loadNormalReplaySelMask)(rem)))
402  val s0_loadPriorityReplaySelMask = Mux(s0_hintSelValid, s0_loadHintSelMask, Mux(s0_loadHigherPriorityReplaySelMask.orR, s0_loadHigherPriorityReplaySelMask, s0_loadLowerPriorityReplaySelMask))
403  val s0_remPriorityReplaySelVec = VecInit((0 until LoadPipelineWidth).map(rem => getRemBits(s0_loadPriorityReplaySelMask)(rem)))
404
405  /******************************************************************************************************
406   * WARNING: Make sure that OldestSelectStride must less than or equal stages of load pipeline.        *
407   ******************************************************************************************************
408   */
409  val OldestSelectStride = 4
410  val oldestPtrExt = (0 until OldestSelectStride).map(i => io.ldWbPtr + i.U)
411  val s0_oldestMatchMaskVec = (0 until LoadQueueReplaySize).map(i => (0 until OldestSelectStride).map(j => s0_loadNormalReplaySelMask(i) && uop(i).lqIdx === oldestPtrExt(j)))
412  val s0_remOldsetMatchMaskVec = (0 until LoadPipelineWidth).map(rem => getRemSeq(s0_oldestMatchMaskVec.map(_.take(1)))(rem))
413  val s0_remOlderMatchMaskVec = (0 until LoadPipelineWidth).map(rem => getRemSeq(s0_oldestMatchMaskVec.map(_.drop(1)))(rem))
414  val s0_remOldestSelVec = VecInit(Seq.tabulate(LoadPipelineWidth)(rem => {
415    VecInit((0 until LoadQueueReplaySize / LoadPipelineWidth).map(i => {
416      Mux(VecInit(s0_remOldsetMatchMaskVec(rem).map(_(0))).asUInt.orR, s0_remOldsetMatchMaskVec(rem)(i)(0), s0_remOlderMatchMaskVec(rem)(i).reduce(_|_))
417    })).asUInt
418  }))
419  val s0_remOldestHintSelVec = s0_remOldestSelVec.zip(s0_remLoadHintSelMask).map {
420    case(oldestVec, hintVec) => oldestVec & hintVec
421  }
422
423  // select oldest logic
424  s0_oldestSel := VecInit((0 until LoadPipelineWidth).map(rport => {
425    // select enqueue earlest inst
426    val ageOldest = AgeDetector(LoadQueueReplaySize / LoadPipelineWidth, s0_remEnqSelVec(rport), s0_remFreeSelVec(rport), s0_remPriorityReplaySelVec(rport))
427    assert(!(ageOldest.valid && PopCount(ageOldest.bits) > 1.U), "oldest index must be one-hot!")
428    val ageOldestValid = ageOldest.valid
429    val ageOldestIndexOH = ageOldest.bits
430
431    // select program order oldest
432    val l2HintFirst = io.l2_hint.valid && s0_remOldestHintSelVec(rport).orR
433    val issOldestValid = l2HintFirst || s0_remOldestSelVec(rport).orR
434    val issOldestIndexOH = Mux(l2HintFirst, PriorityEncoderOH(s0_remOldestHintSelVec(rport)), PriorityEncoderOH(s0_remOldestSelVec(rport)))
435
436    val oldest = Wire(Valid(UInt()))
437    val oldestSel = Mux(issOldestValid, issOldestIndexOH, ageOldestIndexOH)
438    val oldestBitsVec = Wire(Vec(LoadQueueReplaySize, Bool()))
439
440    require((LoadQueueReplaySize % LoadPipelineWidth) == 0)
441    oldestBitsVec.foreach(e => e := false.B)
442    for (i <- 0 until LoadQueueReplaySize / LoadPipelineWidth) {
443      oldestBitsVec(i * LoadPipelineWidth + rport) := oldestSel(i)
444    }
445
446    oldest.valid := ageOldest.valid || issOldestValid
447    oldest.bits := OHToUInt(oldestBitsVec.asUInt)
448    oldest
449  }))
450
451
452  // Replay port reorder
453  class BalanceEntry extends XSBundle {
454    val balance = Bool()
455    val index   = UInt(log2Up(LoadQueueReplaySize).W)
456    val port    = UInt(log2Up(LoadPipelineWidth).W)
457  }
458
459  def balanceReOrder(sel: Seq[ValidIO[BalanceEntry]]): Seq[ValidIO[BalanceEntry]] = {
460    require(sel.length > 0)
461    val balancePick = ParallelPriorityMux(sel.map(x => (x.valid && x.bits.balance) -> x))
462    val reorderSel = Wire(Vec(sel.length, ValidIO(new BalanceEntry)))
463    (0 until sel.length).map(i =>
464      if (i == 0) {
465        when (balancePick.valid && balancePick.bits.balance) {
466          reorderSel(i) := balancePick
467        } .otherwise {
468          reorderSel(i) := sel(i)
469        }
470      } else {
471        when (balancePick.valid && balancePick.bits.balance && i.U === balancePick.bits.port) {
472          reorderSel(i) := sel(0)
473        } .otherwise {
474          reorderSel(i) := sel(i)
475        }
476      }
477    )
478    reorderSel
479  }
480
481  // stage2: send replay request to load unit
482  // replay cold down
483  val ColdDownCycles = 16
484  val coldCounter = RegInit(VecInit(List.fill(LoadPipelineWidth)(0.U(log2Up(ColdDownCycles).W))))
485  val ColdDownThreshold = Wire(UInt(log2Up(ColdDownCycles).W))
486  ColdDownThreshold := Constantin.createRecord("ColdDownThreshold_"+p(XSCoreParamsKey).HartId.toString(), initValue = 12.U)
487  assert(ColdDownCycles.U > ColdDownThreshold, "ColdDownCycles must great than ColdDownThreshold!")
488
489  def replayCanFire(i: Int) = coldCounter(i) >= 0.U && coldCounter(i) < ColdDownThreshold
490  def coldDownNow(i: Int) = coldCounter(i) >= ColdDownThreshold
491
492  val s1_balanceOldestSelExt = (0 until LoadPipelineWidth).map(i => {
493    val wrapper = Wire(Valid(new BalanceEntry))
494    wrapper.valid        := s1_oldestSel(i).valid
495    wrapper.bits.balance := cause(s1_oldestSel(i).bits)(LoadReplayCauses.C_BC)
496    wrapper.bits.index   := s1_oldestSel(i).bits
497    wrapper.bits.port    := i.U
498    wrapper
499  })
500
501  val s1_balanceOldestSel = VecInit(balanceReOrder(s1_balanceOldestSelExt))
502  for (i <- 0 until LoadPipelineWidth) {
503    val s0_can_go = s1_can_go(s1_balanceOldestSel(i).bits.port) || uop(s1_oldestSel(i).bits).robIdx.needFlush(io.redirect)
504    val s0_cancel = uop(s0_oldestSel(i).bits).robIdx.needFlush(io.redirect)
505    val s0_oldestSelV = s0_oldestSel(i).valid && !s0_cancel
506    s1_oldestSel(i).valid := RegEnable(s0_oldestSelV, s0_can_go)
507    s1_oldestSel(i).bits := RegEnable(s0_oldestSel(i).bits, s0_can_go)
508
509    when (s0_can_go && s0_oldestSelV) {
510      scheduled(s0_oldestSel(i).bits) := true.B
511    }
512  }
513  val s2_cancelReplay = Wire(Vec(LoadPipelineWidth, Bool()))
514  for (i <- 0 until LoadPipelineWidth) {
515    val s1_cancel = uop(s1_balanceOldestSel(i).bits.index).robIdx.needFlush(io.redirect)
516    val s1_oldestSelV = s1_balanceOldestSel(i).valid && !s1_cancel
517    s1_can_go(i)          := Mux(s2_oldestSel(i).valid && !s2_cancelReplay(i), io.replay(i).ready && replayCanFire(i), true.B)
518    s2_oldestSel(i).valid := RegEnable(s1_oldestSelV, s1_can_go(i))
519    s2_oldestSel(i).bits  := RegEnable(s1_balanceOldestSel(i).bits.index, s1_can_go(i))
520
521    vaddrModule.io.ren(i) := s1_balanceOldestSel(i).valid && s1_can_go(i)
522    vaddrModule.io.raddr(i) := s1_balanceOldestSel(i).bits.index
523  }
524
525  for (i <- 0 until LoadPipelineWidth) {
526    val s1_replayIdx = s1_balanceOldestSel(i).bits.index
527    val s2_replayUop = RegEnable(uop(s1_replayIdx), s1_can_go(i))
528    val s2_replayMSHRId = RegEnable(missMSHRId(s1_replayIdx), s1_can_go(i))
529    val s2_replacementUpdated = RegEnable(replacementUpdated(s1_replayIdx), s1_can_go(i))
530    val s2_replayCauses = RegEnable(cause(s1_replayIdx), s1_can_go(i))
531    val s2_replayCarry = RegEnable(replayCarryReg(s1_replayIdx), s1_can_go(i))
532    val s2_replayCacheMissReplay = RegEnable(trueCacheMissReplay(s1_replayIdx), s1_can_go(i))
533    s2_cancelReplay(i) := s2_replayUop.robIdx.needFlush(io.redirect)
534
535    s2_can_go(i) := DontCare
536    io.replay(i).valid             := s2_oldestSel(i).valid && !s2_cancelReplay(i) && replayCanFire(i)
537    io.replay(i).bits              := DontCare
538    io.replay(i).bits.uop          := s2_replayUop
539    io.replay(i).bits.vaddr        := vaddrModule.io.rdata(i)
540    io.replay(i).bits.isFirstIssue := false.B
541    io.replay(i).bits.isLoadReplay := true.B
542    io.replay(i).bits.replayCarry  := s2_replayCarry
543    io.replay(i).bits.mshrid       := s2_replayMSHRId
544    io.replay(i).bits.replacementUpdated := s2_replacementUpdated
545    io.replay(i).bits.forward_tlDchannel := s2_replayCauses(LoadReplayCauses.C_DM)
546    io.replay(i).bits.schedIndex   := s2_oldestSel(i).bits
547
548    when (io.replay(i).fire) {
549      XSError(!allocated(s2_oldestSel(i).bits), p"LoadQueueReplay: why replay an invalid entry ${s2_oldestSel(i).bits} ?")
550    }
551  }
552
553  // update cold counter
554  val lastReplay = RegNext(VecInit(io.replay.map(_.fire)))
555  for (i <- 0 until LoadPipelineWidth) {
556    when (lastReplay(i) && io.replay(i).fire) {
557      coldCounter(i) := coldCounter(i) + 1.U
558    } .elsewhen (coldDownNow(i)) {
559      coldCounter(i) := coldCounter(i) + 1.U
560    } .otherwise {
561      coldCounter(i) := 0.U
562    }
563  }
564
565  when(io.refill.valid) {
566    XSDebug("miss resp: paddr:0x%x data %x\n", io.refill.bits.addr, io.refill.bits.data)
567  }
568
569  //  LoadQueueReplay deallocate
570  val freeMaskVec = Wire(Vec(LoadQueueReplaySize, Bool()))
571
572  // init
573  freeMaskVec.map(e => e := false.B)
574
575  // Allocate logic
576  val enqValidVec = Wire(Vec(LoadPipelineWidth, Bool()))
577  val enqIndexVec = Wire(Vec(LoadPipelineWidth, UInt()))
578
579  val newEnqueue = (0 until LoadPipelineWidth).map(i => {
580    needEnqueue(i) && !io.enq(i).bits.isLoadReplay
581  })
582
583  for ((enq, w) <- io.enq.zipWithIndex) {
584    vaddrModule.io.wen(w) := false.B
585    freeList.io.doAllocate(w) := false.B
586
587    freeList.io.allocateReq(w) := newEnqueue(w)
588
589    //  Allocated ready
590    enqValidVec(w) := freeList.io.canAllocate(w)
591    enqIndexVec(w) := Mux(enq.bits.isLoadReplay, enq.bits.schedIndex, freeList.io.allocateSlot(w))
592    selectIndexOH(w) := UIntToOH(enqIndexVec(w))
593    enq.ready := Mux(enq.bits.isLoadReplay, true.B, enqValidVec(w))
594
595    val enqIndex = enqIndexVec(w)
596    when (needEnqueue(w) && enq.ready) {
597
598      val debug_robIdx = enq.bits.uop.robIdx.asUInt
599      XSError(allocated(enqIndex) && !enq.bits.isLoadReplay, p"LoadQueueReplay: can not accept more load, check: ldu $w, robIdx $debug_robIdx!")
600      XSError(hasExceptions(w), p"LoadQueueReplay: The instruction has exception, it can not be replay, check: ldu $w, robIdx $debug_robIdx!")
601
602      freeList.io.doAllocate(w) := !enq.bits.isLoadReplay
603
604      //  Allocate new entry
605      allocated(enqIndex) := true.B
606      scheduled(enqIndex) := false.B
607      uop(enqIndex)       := enq.bits.uop
608
609      vaddrModule.io.wen(w)   := true.B
610      vaddrModule.io.waddr(w) := enqIndex
611      vaddrModule.io.wdata(w) := enq.bits.vaddr
612      debug_vaddr(enqIndex)   := enq.bits.vaddr
613
614      /**
615       * used for feedback and replay
616       */
617      // set flags
618      val replayInfo = enq.bits.rep_info
619      val dataInLastBeat = replayInfo.last_beat
620      cause(enqIndex) := replayInfo.cause.asUInt
621
622      // update credit
623      val blockCyclesTlbPtr = blockPtrTlb(enqIndex)
624      val blockCyclesCachePtr = blockPtrCache(enqIndex)
625      val blockCyclesOtherPtr = blockPtrOthers(enqIndex)
626      creditUpdate(enqIndex) := Mux(replayInfo.cause(LoadReplayCauses.C_TM), blockCyclesTlb(blockCyclesTlbPtr),
627                                Mux(replayInfo.cause(LoadReplayCauses.C_DM), blockCyclesCache(blockCyclesCachePtr) + dataInLastBeat, blockCyclesOthers(blockCyclesOtherPtr)))
628
629      // init
630      blockByTlbMiss(enqIndex)     := false.B
631      blockByMemAmb(enqIndex)   := false.B
632      blockByForwardFail(enqIndex) := false.B
633      blockByCacheMiss(enqIndex)   := false.B
634      blockByRARReject(enqIndex)   := false.B
635      blockByRAWReject(enqIndex)   := false.B
636      blockByOthers(enqIndex)      := false.B
637
638      // update block pointer
639      when (replayInfo.cause(LoadReplayCauses.C_DR)) {
640        // normal case: dcache replay
641        blockByOthers(enqIndex) := true.B
642        blockPtrOthers(enqIndex) :=  Mux(blockPtrOthers(enqIndex) === 3.U(2.W), blockPtrOthers(enqIndex), blockPtrOthers(enqIndex) + 1.U(2.W))
643      } .elsewhen (replayInfo.cause(LoadReplayCauses.C_BC) || replayInfo.cause(LoadReplayCauses.C_NK)) {
644        // normal case: bank conflict or schedule error
645        // can replay next cycle
646        creditUpdate(enqIndex) := 0.U
647        blockByOthers(enqIndex) := false.B
648      }
649
650      // special case: tlb miss
651      when (replayInfo.cause(LoadReplayCauses.C_TM)) {
652        blockByTlbMiss(enqIndex) := true.B
653        blockPtrTlb(enqIndex) := Mux(blockPtrTlb(enqIndex) === 3.U(2.W), blockPtrTlb(enqIndex), blockPtrTlb(enqIndex) + 1.U(2.W))
654      }
655
656      // special case: dcache miss
657      when (replayInfo.cause(LoadReplayCauses.C_DM) && enq.bits.handledByMSHR) {
658        blockByCacheMiss(enqIndex) := !replayInfo.full_fwd && //  dcache miss
659                                  !(io.tl_d_channel.valid && io.tl_d_channel.mshrid === replayInfo.mshr_id) // no refill in this cycle
660
661        blockPtrCache(enqIndex) := Mux(blockPtrCache(enqIndex) === 3.U(2.W), blockPtrCache(enqIndex), blockPtrCache(enqIndex) + 1.U(2.W))
662      }
663
664      // special case: st-ld violation
665      when (replayInfo.cause(LoadReplayCauses.C_MA)) {
666        blockByMemAmb(enqIndex) := true.B
667        blockSqIdx(enqIndex) := replayInfo.addr_inv_sq_idx
668        blockPtrOthers(enqIndex) :=  Mux(blockPtrOthers(enqIndex) === 3.U(2.W), blockPtrOthers(enqIndex), blockPtrOthers(enqIndex) + 1.U(2.W))
669      }
670
671      // special case: data forward fail
672      when (replayInfo.cause(LoadReplayCauses.C_FF)) {
673        blockByForwardFail(enqIndex) := true.B
674        blockSqIdx(enqIndex) := replayInfo.data_inv_sq_idx
675        blockPtrOthers(enqIndex) :=  Mux(blockPtrOthers(enqIndex) === 3.U(2.W), blockPtrOthers(enqIndex), blockPtrOthers(enqIndex) + 1.U(2.W))
676      }
677
678      // special case: rar reject
679      when (replayInfo.cause(LoadReplayCauses.C_RAR)) {
680        blockByRARReject(enqIndex) := true.B
681        blockPtrOthers(enqIndex) :=  Mux(blockPtrOthers(enqIndex) === 3.U(2.W), blockPtrOthers(enqIndex), blockPtrOthers(enqIndex) + 1.U(2.W))
682      }
683
684      // special case: raw reject
685      when (replayInfo.cause(LoadReplayCauses.C_RAW)) {
686        blockByRAWReject(enqIndex) := true.B
687        blockPtrOthers(enqIndex) :=  Mux(blockPtrOthers(enqIndex) === 3.U(2.W), blockPtrOthers(enqIndex), blockPtrOthers(enqIndex) + 1.U(2.W))
688      }
689
690      // extra info
691      replayCarryReg(enqIndex) := replayInfo.rep_carry
692      replacementUpdated(enqIndex) := enq.bits.replacementUpdated
693      // update mshr_id only when the load has already been handled by mshr
694      when(enq.bits.handledByMSHR) {
695        missMSHRId(enqIndex) := replayInfo.mshr_id
696      }
697      dataInLastBeatReg(enqIndex) := dataInLastBeat
698    }
699
700    //
701    val schedIndex = enq.bits.schedIndex
702    when (enq.valid && enq.bits.isLoadReplay) {
703      when (!needReplay(w) || hasExceptions(w)) {
704        allocated(schedIndex) := false.B
705        freeMaskVec(schedIndex) := true.B
706      } .otherwise {
707        scheduled(schedIndex) := false.B
708      }
709    }
710  }
711
712  // misprediction recovery / exception redirect
713  for (i <- 0 until LoadQueueReplaySize) {
714    needCancel(i) := uop(i).robIdx.needFlush(io.redirect) && allocated(i)
715    when (needCancel(i)) {
716      allocated(i) := false.B
717      freeMaskVec(i) := true.B
718    }
719  }
720
721  freeList.io.free := freeMaskVec.asUInt
722
723  io.lqFull := lqFull
724
725  // Topdown
726  val sourceVaddr = WireInit(0.U.asTypeOf(new Valid(UInt(VAddrBits.W))))
727
728  ExcitingUtils.addSink(sourceVaddr, s"rob_head_vaddr_${coreParams.HartId}", ExcitingUtils.Perf)
729
730  val uop_wrapper = Wire(Vec(LoadQueueReplaySize, new XSBundleWithMicroOp))
731  (uop_wrapper.zipWithIndex).foreach {
732    case (u, i) => {
733      u.uop := uop(i)
734    }
735  }
736  val lq_match_vec = (debug_vaddr.zip(allocated)).map{case(va, alloc) => alloc && (va === sourceVaddr.bits)}
737  val rob_head_lq_match = ParallelOperation(lq_match_vec.zip(uop_wrapper), (a: Tuple2[Bool, XSBundleWithMicroOp], b: Tuple2[Bool, XSBundleWithMicroOp]) => {
738    val (a_v, a_uop) = (a._1, a._2)
739    val (b_v, b_uop) = (b._1, b._2)
740
741    val res = Mux(a_v && b_v, Mux(isAfter(a_uop.uop.robIdx, b_uop.uop.robIdx), b_uop, a_uop),
742                  Mux(a_v, a_uop,
743                      Mux(b_v, b_uop,
744                                a_uop)))
745    (a_v || b_v, res)
746  })
747
748  val lq_match_bits = rob_head_lq_match._2.uop
749  val lq_match      = rob_head_lq_match._1 && sourceVaddr.valid
750  val lq_match_idx  = lq_match_bits.lqIdx.value
751
752  val rob_head_tlb_miss        = lq_match && cause(lq_match_idx)(LoadReplayCauses.C_TM)
753  val rob_head_nuke            = lq_match && cause(lq_match_idx)(LoadReplayCauses.C_NK)
754  val rob_head_mem_amb         = lq_match && cause(lq_match_idx)(LoadReplayCauses.C_MA)
755  val rob_head_confilct_replay = lq_match && cause(lq_match_idx)(LoadReplayCauses.C_BC)
756  val rob_head_forward_fail    = lq_match && cause(lq_match_idx)(LoadReplayCauses.C_FF)
757  val rob_head_mshrfull_replay = lq_match && cause(lq_match_idx)(LoadReplayCauses.C_DR)
758  val rob_head_dcache_miss     = lq_match && cause(lq_match_idx)(LoadReplayCauses.C_DM)
759  val rob_head_rar_nack        = lq_match && cause(lq_match_idx)(LoadReplayCauses.C_RAR)
760  val rob_head_raw_nack        = lq_match && cause(lq_match_idx)(LoadReplayCauses.C_RAW)
761  val rob_head_other_replay    = lq_match && (rob_head_rar_nack || rob_head_raw_nack || rob_head_forward_fail)
762
763  val rob_head_vio_replay = rob_head_nuke || rob_head_mem_amb
764
765  val rob_head_miss_in_dtlb = WireInit(false.B)
766  ExcitingUtils.addSink(rob_head_miss_in_dtlb, s"miss_in_dtlb_${coreParams.HartId}", ExcitingUtils.Perf)
767  ExcitingUtils.addSource(rob_head_tlb_miss && !rob_head_miss_in_dtlb, s"load_tlb_replay_stall_${coreParams.HartId}", ExcitingUtils.Perf, true)
768  ExcitingUtils.addSource(rob_head_tlb_miss &&  rob_head_miss_in_dtlb, s"load_tlb_miss_stall_${coreParams.HartId}", ExcitingUtils.Perf, true)
769  ExcitingUtils.addSource(rob_head_vio_replay, s"load_vio_replay_stall_${coreParams.HartId}", ExcitingUtils.Perf, true)
770  ExcitingUtils.addSource(rob_head_mshrfull_replay, s"load_mshr_replay_stall_${coreParams.HartId}", ExcitingUtils.Perf, true)
771  // ExcitingUtils.addSource(rob_head_confilct_replay, s"load_l1_cache_stall_with_bank_conflict_${coreParams.HartId}", ExcitingUtils.Perf, true)
772  ExcitingUtils.addSource(rob_head_other_replay, s"rob_head_other_replay_${coreParams.HartId}", ExcitingUtils.Perf, true)
773  val perfValidCount = RegNext(PopCount(allocated))
774
775  //  perf cnt
776  val enqNumber               = PopCount(io.enq.map(enq => enq.fire && !enq.bits.isLoadReplay))
777  val deqNumber               = PopCount(io.replay.map(_.fire))
778  val deqBlockCount           = PopCount(io.replay.map(r => r.valid && !r.ready))
779  val replayTlbMissCount      = PopCount(io.enq.map(enq => enq.fire && !enq.bits.isLoadReplay && enq.bits.rep_info.cause(LoadReplayCauses.C_TM)))
780  val replayMemAmbCount    = PopCount(io.enq.map(enq => enq.fire && !enq.bits.isLoadReplay && enq.bits.rep_info.cause(LoadReplayCauses.C_NK)))
781  val replayNukeCount         = PopCount(io.enq.map(enq => enq.fire && !enq.bits.isLoadReplay && enq.bits.rep_info.cause(LoadReplayCauses.C_MA)))
782  val replayRARRejectCount    = PopCount(io.enq.map(enq => enq.fire && !enq.bits.isLoadReplay && enq.bits.rep_info.cause(LoadReplayCauses.C_RAR)))
783  val replayRAWRejectCount    = PopCount(io.enq.map(enq => enq.fire && !enq.bits.isLoadReplay && enq.bits.rep_info.cause(LoadReplayCauses.C_RAW)))
784  val replayBankConflictCount = PopCount(io.enq.map(enq => enq.fire && !enq.bits.isLoadReplay && enq.bits.rep_info.cause(LoadReplayCauses.C_BC)))
785  val replayDCacheReplayCount = PopCount(io.enq.map(enq => enq.fire && !enq.bits.isLoadReplay && enq.bits.rep_info.cause(LoadReplayCauses.C_DR)))
786  val replayForwardFailCount  = PopCount(io.enq.map(enq => enq.fire && !enq.bits.isLoadReplay && enq.bits.rep_info.cause(LoadReplayCauses.C_FF)))
787  val replayDCacheMissCount   = PopCount(io.enq.map(enq => enq.fire && !enq.bits.isLoadReplay && enq.bits.rep_info.cause(LoadReplayCauses.C_DM)))
788  XSPerfAccumulate("enq", enqNumber)
789  XSPerfAccumulate("deq", deqNumber)
790  XSPerfAccumulate("deq_block", deqBlockCount)
791  XSPerfAccumulate("replay_full", io.lqFull)
792  XSPerfAccumulate("replay_rar_nack", replayRARRejectCount)
793  XSPerfAccumulate("replay_raw_nack", replayRAWRejectCount)
794  XSPerfAccumulate("replay_nuke", replayNukeCount)
795  XSPerfAccumulate("replay_mem_amb", replayMemAmbCount)
796  XSPerfAccumulate("replay_tlb_miss", replayTlbMissCount)
797  XSPerfAccumulate("replay_bank_conflict", replayBankConflictCount)
798  XSPerfAccumulate("replay_dcache_replay", replayDCacheReplayCount)
799  XSPerfAccumulate("replay_forward_fail", replayForwardFailCount)
800  XSPerfAccumulate("replay_dcache_miss", replayDCacheMissCount)
801  XSPerfAccumulate("replay_hint_wakeup", s0_hintSelValid)
802
803  val perfEvents: Seq[(String, UInt)] = Seq(
804    ("enq", enqNumber),
805    ("deq", deqNumber),
806    ("deq_block", deqBlockCount),
807    ("replay_full", io.lqFull),
808    ("replay_rar_nack", replayRARRejectCount),
809    ("replay_raw_nack", replayRAWRejectCount),
810    ("replay_nuke", replayNukeCount),
811    ("replay_mem_amb", replayMemAmbCount),
812    ("replay_tlb_miss", replayTlbMissCount),
813    ("replay_bank_conflict", replayBankConflictCount),
814    ("replay_dcache_replay", replayDCacheReplayCount),
815    ("replay_forward_fail", replayForwardFailCount),
816    ("replay_dcache_miss", replayDCacheMissCount),
817  )
818  generatePerfEvent()
819  // end
820}
821