xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueue.scala (revision 5da19fb3f5e30e8e3654dcd8ba1fefc3f257bb3a)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import xiangshan._
24import xiangshan.backend.fu.fpu.FPU
25import xiangshan.backend.rob.RobLsqIO
26import xiangshan.cache._
27import xiangshan.frontend.FtqPtr
28import xiangshan.ExceptionNO._
29import chisel3.ExcitingUtils
30
31class LqPtr(implicit p: Parameters) extends CircularQueuePtr[LqPtr](
32  p => p(XSCoreParamsKey).LoadQueueSize
33){
34}
35
36object LqPtr {
37  def apply(f: Bool, v: UInt)(implicit p: Parameters): LqPtr = {
38    val ptr = Wire(new LqPtr)
39    ptr.flag := f
40    ptr.value := v
41    ptr
42  }
43}
44
45trait HasLoadHelper { this: XSModule =>
46  def rdataHelper(uop: MicroOp, rdata: UInt): UInt = {
47    val fpWen = uop.ctrl.fpWen
48    LookupTree(uop.ctrl.fuOpType, List(
49      LSUOpType.lb   -> SignExt(rdata(7, 0) , XLEN),
50      LSUOpType.lh   -> SignExt(rdata(15, 0), XLEN),
51      /*
52          riscv-spec-20191213: 12.2 NaN Boxing of Narrower Values
53          Any operation that writes a narrower result to an f register must write
54          all 1s to the uppermost FLEN−n bits to yield a legal NaN-boxed value.
55      */
56      LSUOpType.lw   -> Mux(fpWen, FPU.box(rdata, FPU.S), SignExt(rdata(31, 0), XLEN)),
57      LSUOpType.ld   -> Mux(fpWen, FPU.box(rdata, FPU.D), SignExt(rdata(63, 0), XLEN)),
58      LSUOpType.lbu  -> ZeroExt(rdata(7, 0) , XLEN),
59      LSUOpType.lhu  -> ZeroExt(rdata(15, 0), XLEN),
60      LSUOpType.lwu  -> ZeroExt(rdata(31, 0), XLEN),
61    ))
62  }
63}
64
65class LqEnqIO(implicit p: Parameters) extends XSBundle {
66  val canAccept = Output(Bool())
67  val sqCanAccept = Input(Bool())
68  val needAlloc = Vec(exuParameters.LsExuCnt, Input(Bool()))
69  val req = Vec(exuParameters.LsExuCnt, Flipped(ValidIO(new MicroOp)))
70  val resp = Vec(exuParameters.LsExuCnt, Output(new LqPtr))
71}
72
73class LqPaddrWriteBundle(implicit p: Parameters) extends XSBundle {
74  val paddr = Output(UInt(PAddrBits.W))
75  val lqIdx = Output(new LqPtr)
76}
77
78class LqTriggerIO(implicit p: Parameters) extends XSBundle {
79  val hitLoadAddrTriggerHitVec = Input(Vec(3, Bool()))
80  val lqLoadAddrTriggerHitVec = Output(Vec(3, Bool()))
81}
82
83// Load Queue
84class LoadQueue(implicit p: Parameters) extends XSModule
85  with HasDCacheParameters
86  with HasCircularQueuePtrHelper
87  with HasLoadHelper
88  with HasPerfEvents
89{
90  val io = IO(new Bundle() {
91    val enq = new LqEnqIO
92    val brqRedirect = Flipped(ValidIO(new Redirect))
93    val loadPaddrIn = Vec(LoadPipelineWidth, Flipped(Valid(new LqPaddrWriteBundle)))
94    val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LqWriteBundle)))
95    val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle)))
96    val s2_load_data_forwarded = Vec(LoadPipelineWidth, Input(Bool()))
97    val s3_delayed_load_error = Vec(LoadPipelineWidth, Input(Bool()))
98    val s2_dcache_require_replay = Vec(LoadPipelineWidth, Input(Bool()))
99    val s3_replay_from_fetch = Vec(LoadPipelineWidth, Input(Bool()))
100    val ldout = Vec(LoadPipelineWidth, DecoupledIO(new ExuOutput)) // writeback int load
101    val ldRawDataOut = Vec(LoadPipelineWidth, Output(new LoadDataFromLQBundle))
102    val load_s1 = Vec(LoadPipelineWidth, Flipped(new PipeLoadForwardQueryIO)) // TODO: to be renamed
103    val loadViolationQuery = Vec(LoadPipelineWidth, Flipped(new LoadViolationQueryIO))
104    val rob = Flipped(new RobLsqIO)
105    val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store
106    val refill = Flipped(ValidIO(new Refill))
107    val release = Flipped(ValidIO(new Release))
108    val uncache = new UncacheWordIO
109    val exceptionAddr = new ExceptionAddrIO
110    val lqFull = Output(Bool())
111    val lqCancelCnt = Output(UInt(log2Up(LoadQueueSize + 1).W))
112    val trigger = Vec(LoadPipelineWidth, new LqTriggerIO)
113  })
114
115  println("LoadQueue: size:" + LoadQueueSize)
116
117  val uop = Reg(Vec(LoadQueueSize, new MicroOp))
118  // val data = Reg(Vec(LoadQueueSize, new LsRobEntry))
119  val dataModule = Module(new LoadQueueDataWrapper(LoadQueueSize, wbNumRead = LoadPipelineWidth, wbNumWrite = LoadPipelineWidth))
120  dataModule.io := DontCare
121  val vaddrModule = Module(new SyncDataModuleTemplate(UInt(VAddrBits.W), LoadQueueSize, numRead = LoadPipelineWidth + 1, numWrite = LoadPipelineWidth))
122  vaddrModule.io := DontCare
123  val vaddrTriggerResultModule = Module(new SyncDataModuleTemplate(Vec(3, Bool()), LoadQueueSize, numRead = LoadPipelineWidth, numWrite = LoadPipelineWidth))
124  vaddrTriggerResultModule.io := DontCare
125  val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated
126  val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid
127  val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
128  val released = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // load data has been released by dcache
129  val error = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // load data has been corrupted
130  val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request
131  // val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result
132  val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of rob
133  val refilling = WireInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
134
135  val debug_mmio = Reg(Vec(LoadQueueSize, Bool())) // mmio: inst is an mmio inst
136  val debug_paddr = Reg(Vec(LoadQueueSize, UInt(PAddrBits.W))) // mmio: inst is an mmio inst
137
138  val enqPtrExt = RegInit(VecInit((0 until io.enq.req.length).map(_.U.asTypeOf(new LqPtr))))
139  val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
140  val deqPtrExtNext = Wire(new LqPtr)
141
142  val enqPtr = enqPtrExt(0).value
143  val deqPtr = deqPtrExt.value
144
145  val validCount = distanceBetween(enqPtrExt(0), deqPtrExt)
146  val allowEnqueue = validCount <= (LoadQueueSize - LoadPipelineWidth).U
147
148  val deqMask = UIntToMask(deqPtr, LoadQueueSize)
149  val enqMask = UIntToMask(enqPtr, LoadQueueSize)
150
151  val commitCount = RegNext(io.rob.lcommit)
152
153  val release1cycle = io.release
154  val release2cycle = RegNext(io.release)
155  val release2cycle_dup_lsu = RegNext(io.release)
156
157  /**
158    * Enqueue at dispatch
159    *
160    * Currently, LoadQueue only allows enqueue when #emptyEntries > EnqWidth
161    */
162  io.enq.canAccept := allowEnqueue
163
164  val canEnqueue = io.enq.req.map(_.valid)
165  val enqCancel = io.enq.req.map(_.bits.robIdx.needFlush(io.brqRedirect))
166  for (i <- 0 until io.enq.req.length) {
167    val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i))
168    val lqIdx = enqPtrExt(offset)
169    val index = io.enq.req(i).bits.lqIdx.value
170    when (canEnqueue(i) && !enqCancel(i)) {
171      uop(index).robIdx := io.enq.req(i).bits.robIdx
172      allocated(index) := true.B
173      datavalid(index) := false.B
174      writebacked(index) := false.B
175      released(index) := false.B
176      miss(index) := false.B
177      pending(index) := false.B
178      error(index) := false.B
179      XSError(!io.enq.canAccept || !io.enq.sqCanAccept, s"must accept $i\n")
180      XSError(index =/= lqIdx.value, s"must be the same entry $i\n")
181    }
182    io.enq.resp(i) := lqIdx
183  }
184  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
185
186  /**
187    * Writeback load from load units
188    *
189    * Most load instructions writeback to regfile at the same time.
190    * However,
191    *   (1) For an mmio instruction with exceptions, it writes back to ROB immediately.
192    *   (2) For an mmio instruction without exceptions, it does not write back.
193    * The mmio instruction will be sent to lower level when it reaches ROB's head.
194    * After uncache response, it will write back through arbiter with loadUnit.
195    *   (3) For cache misses, it is marked miss and sent to dcache later.
196    * After cache refills, it will write back through arbiter with loadUnit.
197    */
198  for (i <- 0 until LoadPipelineWidth) {
199    dataModule.io.wb.wen(i) := false.B
200    dataModule.io.paddr.wen(i) := false.B
201    vaddrTriggerResultModule.io.wen(i) := false.B
202    val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
203
204    // most lq status need to be updated immediately after load writeback to lq
205    // flag bits in lq needs to be updated accurately
206    when(io.loadIn(i).fire()) {
207      when(io.loadIn(i).bits.miss) {
208        XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x mask %x forwardData %x forwardMask: %x mmio %x\n",
209          io.loadIn(i).bits.uop.lqIdx.asUInt,
210          io.loadIn(i).bits.uop.cf.pc,
211          io.loadIn(i).bits.vaddr,
212          io.loadIn(i).bits.paddr,
213          io.loadIn(i).bits.mask,
214          io.loadIn(i).bits.forwardData.asUInt,
215          io.loadIn(i).bits.forwardMask.asUInt,
216          io.loadIn(i).bits.mmio
217        )
218      }.otherwise {
219        XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x mask %x forwardData %x forwardMask: %x mmio %x\n",
220        io.loadIn(i).bits.uop.lqIdx.asUInt,
221        io.loadIn(i).bits.uop.cf.pc,
222        io.loadIn(i).bits.vaddr,
223        io.loadIn(i).bits.paddr,
224        io.loadIn(i).bits.mask,
225        io.loadIn(i).bits.forwardData.asUInt,
226        io.loadIn(i).bits.forwardMask.asUInt,
227        io.loadIn(i).bits.mmio
228      )}
229      if(EnableFastForward){
230        datavalid(loadWbIndex) := (!io.loadIn(i).bits.miss || io.s2_load_data_forwarded(i)) &&
231          !io.loadIn(i).bits.mmio && // mmio data is not valid until we finished uncache access
232          !io.s2_dcache_require_replay(i) // do not writeback if that inst will be resend from rs
233      } else {
234        datavalid(loadWbIndex) := (!io.loadIn(i).bits.miss || io.s2_load_data_forwarded(i)) &&
235          !io.loadIn(i).bits.mmio // mmio data is not valid until we finished uncache access
236      }
237      writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
238
239      debug_mmio(loadWbIndex) := io.loadIn(i).bits.mmio
240      debug_paddr(loadWbIndex) := io.loadIn(i).bits.paddr
241
242      val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
243      if(EnableFastForward){
244        miss(loadWbIndex) := dcacheMissed && !io.s2_load_data_forwarded(i) && !io.s2_dcache_require_replay(i)
245      } else {
246        miss(loadWbIndex) := dcacheMissed && !io.s2_load_data_forwarded(i)
247      }
248      pending(loadWbIndex) := io.loadIn(i).bits.mmio
249      released(loadWbIndex) := release2cycle.valid &&
250        io.loadIn(i).bits.paddr(PAddrBits-1, DCacheLineOffset) === release2cycle.bits.paddr(PAddrBits-1, DCacheLineOffset) ||
251        release1cycle.valid &&
252        io.loadIn(i).bits.paddr(PAddrBits-1, DCacheLineOffset) === release1cycle.bits.paddr(PAddrBits-1, DCacheLineOffset)
253    }
254
255    // data bit in lq can be updated when load_s2 valid
256    // when(io.loadIn(i).bits.lq_data_wen){
257    //   val loadWbData = Wire(new LQDataEntry)
258    //   loadWbData.paddr := io.loadIn(i).bits.paddr
259    //   loadWbData.mask := io.loadIn(i).bits.mask
260    //   loadWbData.data := io.loadIn(i).bits.forwardData.asUInt // fwd data
261    //   loadWbData.fwdMask := io.loadIn(i).bits.forwardMask
262    //   dataModule.io.wbWrite(i, loadWbIndex, loadWbData)
263    //   dataModule.io.wb.wen(i) := true.B
264
265    //   // dirty code for load instr
266    //   uop(loadWbIndex).pdest := io.loadIn(i).bits.uop.pdest
267    //   uop(loadWbIndex).cf := io.loadIn(i).bits.uop.cf
268    //   uop(loadWbIndex).ctrl := io.loadIn(i).bits.uop.ctrl
269    //   uop(loadWbIndex).debugInfo := io.loadIn(i).bits.uop.debugInfo
270
271    //   vaddrTriggerResultModule.io.waddr(i) := loadWbIndex
272    //   vaddrTriggerResultModule.io.wdata(i) := io.trigger(i).hitLoadAddrTriggerHitVec
273
274    //   vaddrTriggerResultModule.io.wen(i) := true.B
275    // }
276
277    // dirty code to reduce load_s2.valid fanout
278    when(io.loadIn(i).bits.lq_data_wen_dup(0)){
279      val loadWbData = Wire(new LQDataEntry)
280      loadWbData.paddr := io.loadIn(i).bits.paddr
281      loadWbData.mask := io.loadIn(i).bits.mask
282      loadWbData.data := io.loadIn(i).bits.forwardData.asUInt // fwd data
283      loadWbData.fwdMask := io.loadIn(i).bits.forwardMask
284      dataModule.io.wbWrite(i, loadWbIndex, loadWbData)
285      dataModule.io.wb.wen(i) := true.B
286    }
287    // dirty code for load instr
288    when(io.loadIn(i).bits.lq_data_wen_dup(1)){
289      uop(loadWbIndex).pdest := io.loadIn(i).bits.uop.pdest
290    }
291    when(io.loadIn(i).bits.lq_data_wen_dup(2)){
292      uop(loadWbIndex).cf := io.loadIn(i).bits.uop.cf
293    }
294    when(io.loadIn(i).bits.lq_data_wen_dup(3)){
295      uop(loadWbIndex).ctrl := io.loadIn(i).bits.uop.ctrl
296    }
297    when(io.loadIn(i).bits.lq_data_wen_dup(4)){
298      uop(loadWbIndex).debugInfo := io.loadIn(i).bits.uop.debugInfo
299    }
300    when(io.loadIn(i).bits.lq_data_wen_dup(5)){
301      vaddrTriggerResultModule.io.waddr(i) := loadWbIndex
302      vaddrTriggerResultModule.io.wdata(i) := io.trigger(i).hitLoadAddrTriggerHitVec
303      vaddrTriggerResultModule.io.wen(i) := true.B
304    }
305
306    when(io.loadPaddrIn(i).valid) {
307      dataModule.io.paddr.wen(i) := true.B
308      dataModule.io.paddr.waddr(i) := io.loadPaddrIn(i).bits.lqIdx.value
309      dataModule.io.paddr.wdata(i) := io.loadPaddrIn(i).bits.paddr
310    }
311
312    // vaddrModule write is delayed, as vaddrModule will not be read right after write
313    vaddrModule.io.waddr(i) := RegNext(loadWbIndex)
314    vaddrModule.io.wdata(i) := RegNext(io.loadIn(i).bits.vaddr)
315    vaddrModule.io.wen(i) := RegNext(io.loadIn(i).fire())
316  }
317
318  when(io.refill.valid) {
319    XSDebug("miss resp: paddr:0x%x data %x\n", io.refill.bits.addr, io.refill.bits.data)
320  }
321
322  // Refill 64 bit in a cycle
323  // Refill data comes back from io.dcache.resp
324  dataModule.io.refill.valid := io.refill.valid
325  dataModule.io.refill.paddr := io.refill.bits.addr
326  dataModule.io.refill.data := io.refill.bits.data
327
328  val s2_dcache_require_replay = WireInit(VecInit((0 until LoadPipelineWidth).map(i =>{
329    RegNext(io.loadIn(i).fire()) && RegNext(io.s2_dcache_require_replay(i))
330  })))
331  dontTouch(s2_dcache_require_replay)
332
333  (0 until LoadQueueSize).map(i => {
334    dataModule.io.refill.refillMask(i) := allocated(i) && miss(i)
335    when(dataModule.io.refill.valid && dataModule.io.refill.refillMask(i) && dataModule.io.refill.matchMask(i)) {
336      datavalid(i) := true.B
337      miss(i) := false.B
338      when(!s2_dcache_require_replay.asUInt.orR){
339        refilling(i) := true.B
340      }
341      when(io.refill.bits.error) {
342        error(i) := true.B
343      }
344    }
345  })
346
347  for (i <- 0 until LoadPipelineWidth) {
348    val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
349    val lastCycleLoadWbIndex = RegNext(loadWbIndex)
350    // update miss state in load s3
351    if(!EnableFastForward){
352      // s2_dcache_require_replay will be used to update lq flag 1 cycle after for better timing
353      //
354      // io.s2_dcache_require_replay comes from dcache miss req reject, which is quite slow to generate
355      when(s2_dcache_require_replay(i)) {
356        // do not writeback if that inst will be resend from rs
357        // rob writeback will not be triggered by a refill before inst replay
358        miss(lastCycleLoadWbIndex) := false.B // disable refill listening
359        datavalid(lastCycleLoadWbIndex) := false.B // disable refill listening
360        assert(!datavalid(lastCycleLoadWbIndex))
361      }
362    }
363    // update load error state in load s3
364    when(RegNext(io.loadIn(i).fire()) && io.s3_delayed_load_error(i)){
365      uop(lastCycleLoadWbIndex).cf.exceptionVec(loadAccessFault) := true.B
366    }
367    // update inst replay from fetch flag in s3
368    when(RegNext(io.loadIn(i).fire()) && io.s3_replay_from_fetch(i)){
369      uop(lastCycleLoadWbIndex).ctrl.replayInst := true.B
370    }
371  }
372
373
374  // Writeback up to 2 missed load insts to CDB
375  //
376  // Pick 2 missed load (data refilled), write them back to cdb
377  // 2 refilled load will be selected from even/odd entry, separately
378
379  // Stage 0
380  // Generate writeback indexes
381
382  def getRemBits(input: UInt)(rem: Int): UInt = {
383    VecInit((0 until LoadQueueSize / LoadPipelineWidth).map(i => { input(LoadPipelineWidth * i + rem) })).asUInt
384  }
385
386  val loadWbSel = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W))) // index selected last cycle
387  val loadWbSelV = Wire(Vec(LoadPipelineWidth, Bool())) // index selected in last cycle is valid
388
389  val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => {
390    // allocated(i) && !writebacked(i) && (datavalid(i) || refilling(i))
391    allocated(i) && !writebacked(i) && datavalid(i) // query refilling will cause bad timing
392  })).asUInt() // use uint instead vec to reduce verilog lines
393  val remDeqMask = Seq.tabulate(LoadPipelineWidth)(getRemBits(deqMask)(_))
394  // generate lastCycleSelect mask
395  val remFireMask = Seq.tabulate(LoadPipelineWidth)(rem => getRemBits(UIntToOH(loadWbSel(rem)))(rem))
396  // generate real select vec
397  def toVec(a: UInt): Vec[Bool] = {
398    VecInit(a.asBools)
399  }
400  val loadRemSelVecFire = Seq.tabulate(LoadPipelineWidth)(rem => getRemBits(loadWbSelVec)(rem) & ~remFireMask(rem))
401  val loadRemSelVecNotFire = Seq.tabulate(LoadPipelineWidth)(getRemBits(loadWbSelVec)(_))
402  val loadRemSel = Seq.tabulate(LoadPipelineWidth)(rem => Mux(
403    io.ldout(rem).fire(),
404    getFirstOne(toVec(loadRemSelVecFire(rem)), remDeqMask(rem)),
405    getFirstOne(toVec(loadRemSelVecNotFire(rem)), remDeqMask(rem))
406  ))
407
408
409  val loadWbSelGen = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W)))
410  val loadWbSelVGen = Wire(Vec(LoadPipelineWidth, Bool()))
411  (0 until LoadPipelineWidth).foreach(index => {
412    loadWbSelGen(index) := (
413      if (LoadPipelineWidth > 1) Cat(loadRemSel(index), index.U(log2Ceil(LoadPipelineWidth).W))
414      else loadRemSel(index)
415    )
416    loadWbSelVGen(index) := Mux(io.ldout(index).fire, loadRemSelVecFire(index).asUInt.orR, loadRemSelVecNotFire(index).asUInt.orR)
417  })
418
419  (0 until LoadPipelineWidth).map(i => {
420    loadWbSel(i) := RegNext(loadWbSelGen(i))
421    loadWbSelV(i) := RegNext(loadWbSelVGen(i), init = false.B)
422    when(io.ldout(i).fire()){
423      // Mark them as writebacked, so they will not be selected in the next cycle
424      writebacked(loadWbSel(i)) := true.B
425    }
426  })
427
428  // Stage 1
429  // Use indexes generated in cycle 0 to read data
430  // writeback data to cdb
431  (0 until LoadPipelineWidth).map(i => {
432    // data select
433    dataModule.io.wb.raddr(i) := loadWbSelGen(i)
434    val rdata = dataModule.io.wb.rdata(i).data
435    val seluop = uop(loadWbSel(i))
436    val func = seluop.ctrl.fuOpType
437    val raddr = dataModule.io.wb.rdata(i).paddr
438    val rdataSel = LookupTree(raddr(2, 0), List(
439      "b000".U -> rdata(63, 0),
440      "b001".U -> rdata(63, 8),
441      "b010".U -> rdata(63, 16),
442      "b011".U -> rdata(63, 24),
443      "b100".U -> rdata(63, 32),
444      "b101".U -> rdata(63, 40),
445      "b110".U -> rdata(63, 48),
446      "b111".U -> rdata(63, 56)
447    ))
448    val rdataPartialLoad = rdataHelper(seluop, rdataSel)
449
450    // writeback missed int/fp load
451    //
452    // Int load writeback will finish (if not blocked) in one cycle
453    io.ldout(i).bits.uop := seluop
454    io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr)
455    io.ldout(i).bits.data := rdataPartialLoad // not used
456    io.ldout(i).bits.redirectValid := false.B
457    io.ldout(i).bits.redirect := DontCare
458    io.ldout(i).bits.debug.isMMIO := debug_mmio(loadWbSel(i))
459    io.ldout(i).bits.debug.isPerfCnt := false.B
460    io.ldout(i).bits.debug.paddr := debug_paddr(loadWbSel(i))
461    io.ldout(i).bits.debug.vaddr := vaddrModule.io.rdata(i+1)
462    io.ldout(i).bits.fflags := DontCare
463    io.ldout(i).valid := loadWbSelV(i)
464
465    // merged data, uop and offset for data sel in load_s3
466    io.ldRawDataOut(i).lqData := dataModule.io.wb.rdata(i).data
467    io.ldRawDataOut(i).uop := io.ldout(i).bits.uop
468    io.ldRawDataOut(i).addrOffset := dataModule.io.wb.rdata(i).paddr
469
470    when(io.ldout(i).fire()) {
471      XSInfo("int load miss write to cbd robidx %d lqidx %d pc 0x%x mmio %x\n",
472        io.ldout(i).bits.uop.robIdx.asUInt,
473        io.ldout(i).bits.uop.lqIdx.asUInt,
474        io.ldout(i).bits.uop.cf.pc,
475        debug_mmio(loadWbSel(i))
476      )
477    }
478
479  })
480
481  /**
482    * Load commits
483    *
484    * When load commited, mark it as !allocated and move deqPtrExt forward.
485    */
486  (0 until CommitWidth).map(i => {
487    when(commitCount > i.U){
488      allocated((deqPtrExt+i.U).value) := false.B
489      XSError(!allocated((deqPtrExt+i.U).value), s"why commit invalid entry $i?\n")
490    }
491  })
492
493  def getFirstOne(mask: Vec[Bool], startMask: UInt) = {
494    val length = mask.length
495    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
496    val highBitsUint = Cat(highBits.reverse)
497    PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt))
498  }
499
500  def getOldest[T <: XSBundleWithMicroOp](valid: Seq[Bool], bits: Seq[T]): (Seq[Bool], Seq[T]) = {
501    assert(valid.length == bits.length)
502    assert(isPow2(valid.length))
503    if (valid.length == 1) {
504      (valid, bits)
505    } else if (valid.length == 2) {
506      val res = Seq.fill(2)(Wire(ValidIO(chiselTypeOf(bits(0)))))
507      for (i <- res.indices) {
508        res(i).valid := valid(i)
509        res(i).bits := bits(i)
510      }
511      val oldest = Mux(valid(0) && valid(1), Mux(isAfter(bits(0).uop.robIdx, bits(1).uop.robIdx), res(1), res(0)), Mux(valid(0) && !valid(1), res(0), res(1)))
512      (Seq(oldest.valid), Seq(oldest.bits))
513    } else {
514      val left = getOldest(valid.take(valid.length / 2), bits.take(valid.length / 2))
515      val right = getOldest(valid.takeRight(valid.length / 2), bits.takeRight(valid.length / 2))
516      getOldest(left._1 ++ right._1, left._2 ++ right._2)
517    }
518  }
519
520  def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = {
521    assert(valid.length == uop.length)
522    val length = valid.length
523    (0 until length).map(i => {
524      (0 until length).map(j => {
525        Mux(valid(i) && valid(j),
526          isAfter(uop(i).robIdx, uop(j).robIdx),
527          Mux(!valid(i), true.B, false.B))
528      })
529    })
530  }
531
532  /**
533    * Store-Load Memory violation detection
534    *
535    * When store writes back, it searches LoadQueue for younger load instructions
536    * with the same load physical address. They loaded wrong data and need re-execution.
537    *
538    * Cycle 0: Store Writeback
539    *   Generate match vector for store address with rangeMask(stPtr, enqPtr).
540    *   Besides, load instructions in LoadUnit_S1 and S2 are also checked.
541    * Cycle 1: Redirect Generation
542    *   There're three possible types of violations, up to 6 possible redirect requests.
543    *   Choose the oldest load (part 1). (4 + 2) -> (1 + 2)
544    * Cycle 2: Redirect Fire
545    *   Choose the oldest load (part 2). (3 -> 1)
546    *   Prepare redirect request according to the detected violation.
547    *   Fire redirect request (if valid)
548    */
549
550  // stage 0:        lq l1 wb     l1 wb lq
551  //                 |  |  |      |  |  |  (paddr match)
552  // stage 1:        lq l1 wb     l1 wb lq
553  //                 |  |  |      |  |  |
554  //                 |  |------------|  |
555  //                 |        |         |
556  // stage 2:        lq      l1wb       lq
557  //                 |        |         |
558  //                 --------------------
559  //                          |
560  //                      rollback req
561  io.load_s1 := DontCare
562  def detectRollback(i: Int) = {
563    val startIndex = io.storeIn(i).bits.uop.lqIdx.value
564    val lqIdxMask = UIntToMask(startIndex, LoadQueueSize)
565    val xorMask = lqIdxMask ^ enqMask
566    val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt(0).flag
567    val stToEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
568
569    // check if load already in lq needs to be rolledback
570    dataModule.io.violation(i).paddr := io.storeIn(i).bits.paddr
571    dataModule.io.violation(i).mask := io.storeIn(i).bits.mask
572    val addrMaskMatch = RegNext(dataModule.io.violation(i).violationMask)
573    val entryNeedCheck = RegNext(VecInit((0 until LoadQueueSize).map(j => {
574      allocated(j) && stToEnqPtrMask(j) && (datavalid(j) || miss(j))
575    })))
576    val lqViolationVec = VecInit((0 until LoadQueueSize).map(j => {
577      addrMaskMatch(j) && entryNeedCheck(j)
578    }))
579    val lqViolation = lqViolationVec.asUInt().orR() && RegNext(!io.storeIn(i).bits.miss)
580    val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask))
581    val lqViolationUop = uop(lqViolationIndex)
582    // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag
583    // lqViolationUop.lqIdx.value := lqViolationIndex
584    XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n")
585
586    // when l/s writeback to rob together, check if rollback is needed
587    val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
588      io.loadIn(j).valid &&
589      isAfter(io.loadIn(j).bits.uop.robIdx, io.storeIn(i).bits.uop.robIdx) &&
590      io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) &&
591      (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR
592    })))
593    val wbViolation = wbViolationVec.asUInt().orR() && RegNext(io.storeIn(i).valid && !io.storeIn(i).bits.miss)
594    val wbViolationUop = getOldest(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits))))._2(0).uop
595    XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n")
596
597    // check if rollback is needed for load in l1
598    val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
599      io.load_s1(j).valid && // L1 valid
600      isAfter(io.load_s1(j).uop.robIdx, io.storeIn(i).bits.uop.robIdx) &&
601      io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.load_s1(j).paddr(PAddrBits - 1, 3) &&
602      (io.storeIn(i).bits.mask & io.load_s1(j).mask).orR
603    })))
604    val l1Violation = l1ViolationVec.asUInt().orR() && RegNext(io.storeIn(i).valid && !io.storeIn(i).bits.miss)
605    val load_s1 = Wire(Vec(LoadPipelineWidth, new XSBundleWithMicroOp))
606    (0 until LoadPipelineWidth).foreach(i => load_s1(i).uop := io.load_s1(i).uop)
607    val l1ViolationUop = getOldest(l1ViolationVec, RegNext(load_s1))._2(0).uop
608    XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n")
609
610    XSDebug(
611      l1Violation,
612      "need rollback (l1 load) pc %x robidx %d target %x\n",
613      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.robIdx.asUInt, l1ViolationUop.robIdx.asUInt
614    )
615    XSDebug(
616      lqViolation,
617      "need rollback (ld wb before store) pc %x robidx %d target %x\n",
618      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.robIdx.asUInt, lqViolationUop.robIdx.asUInt
619    )
620    XSDebug(
621      wbViolation,
622      "need rollback (ld/st wb together) pc %x robidx %d target %x\n",
623      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.robIdx.asUInt, wbViolationUop.robIdx.asUInt
624    )
625
626    ((lqViolation, lqViolationUop), (wbViolation, wbViolationUop), (l1Violation, l1ViolationUop))
627  }
628
629  def rollbackSel(a: Valid[MicroOpRbExt], b: Valid[MicroOpRbExt]): ValidIO[MicroOpRbExt] = {
630    Mux(
631      a.valid,
632      Mux(
633        b.valid,
634        Mux(isAfter(a.bits.uop.robIdx, b.bits.uop.robIdx), b, a), // a,b both valid, sel oldest
635        a // sel a
636      ),
637      b // sel b
638    )
639  }
640  val lastCycleRedirect = RegNext(io.brqRedirect)
641  val lastlastCycleRedirect = RegNext(lastCycleRedirect)
642
643  // S2: select rollback (part1) and generate rollback request
644  // rollback check
645  // Wb/L1 rollback seq check is done in s2
646  val rollbackWb = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt)))
647  val rollbackL1 = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt)))
648  val rollbackL1Wb = Wire(Vec(StorePipelineWidth*2, Valid(new MicroOpRbExt)))
649  // Lq rollback seq check is done in s3 (next stage), as getting rollbackLq MicroOp is slow
650  val rollbackLq = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt)))
651  // store ftq index for store set update
652  val stFtqIdxS2 = Wire(Vec(StorePipelineWidth, new FtqPtr))
653  val stFtqOffsetS2 = Wire(Vec(StorePipelineWidth, UInt(log2Up(PredictWidth).W)))
654  for (i <- 0 until StorePipelineWidth) {
655    val detectedRollback = detectRollback(i)
656    rollbackLq(i).valid := detectedRollback._1._1 && RegNext(io.storeIn(i).valid)
657    rollbackLq(i).bits.uop := detectedRollback._1._2
658    rollbackLq(i).bits.flag := i.U
659    rollbackWb(i).valid := detectedRollback._2._1 && RegNext(io.storeIn(i).valid)
660    rollbackWb(i).bits.uop := detectedRollback._2._2
661    rollbackWb(i).bits.flag := i.U
662    rollbackL1(i).valid := detectedRollback._3._1 && RegNext(io.storeIn(i).valid)
663    rollbackL1(i).bits.uop := detectedRollback._3._2
664    rollbackL1(i).bits.flag := i.U
665    rollbackL1Wb(2*i) := rollbackL1(i)
666    rollbackL1Wb(2*i+1) := rollbackWb(i)
667    stFtqIdxS2(i) := RegNext(io.storeIn(i).bits.uop.cf.ftqPtr)
668    stFtqOffsetS2(i) := RegNext(io.storeIn(i).bits.uop.cf.ftqOffset)
669  }
670
671  val rollbackL1WbSelected = ParallelOperation(rollbackL1Wb, rollbackSel)
672  val rollbackL1WbVReg = RegNext(rollbackL1WbSelected.valid)
673  val rollbackL1WbReg = RegEnable(rollbackL1WbSelected.bits, rollbackL1WbSelected.valid)
674  val rollbackLqVReg = rollbackLq.map(x => RegNext(x.valid))
675  val rollbackLqReg = rollbackLq.map(x => RegEnable(x.bits, x.valid))
676
677  // S3: select rollback (part2), generate rollback request, then fire rollback request
678  // Note that we use robIdx - 1.U to flush the load instruction itself.
679  // Thus, here if last cycle's robIdx equals to this cycle's robIdx, it still triggers the redirect.
680
681  val rollbackValidVec = rollbackL1WbVReg +: rollbackLqVReg
682  val rollbackUopExtVec = rollbackL1WbReg +: rollbackLqReg
683
684  // select uop in parallel
685  val mask = getAfterMask(rollbackValidVec, rollbackUopExtVec.map(i => i.uop))
686  val lqs = getOldest(rollbackLqVReg, rollbackLqReg)
687  val rollbackUopExt = getOldest(lqs._1 :+ rollbackL1WbVReg, lqs._2 :+ rollbackL1WbReg)._2(0)
688  val stFtqIdxS3 = RegNext(stFtqIdxS2)
689  val stFtqOffsetS3 = RegNext(stFtqOffsetS2)
690  val rollbackUop = rollbackUopExt.uop
691  val rollbackStFtqIdx = stFtqIdxS3(rollbackUopExt.flag)
692  val rollbackStFtqOffset = stFtqOffsetS3(rollbackUopExt.flag)
693
694  // check if rollback request is still valid in parallel
695  val rollbackValidVecChecked = Wire(Vec(LoadPipelineWidth + 1, Bool()))
696  for(((v, uop), idx) <- rollbackValidVec.zip(rollbackUopExtVec.map(i => i.uop)).zipWithIndex) {
697    rollbackValidVecChecked(idx) := v &&
698      (!lastCycleRedirect.valid || isBefore(uop.robIdx, lastCycleRedirect.bits.robIdx)) &&
699      (!lastlastCycleRedirect.valid || isBefore(uop.robIdx, lastlastCycleRedirect.bits.robIdx))
700  }
701
702  io.rollback.bits.robIdx := rollbackUop.robIdx
703  io.rollback.bits.ftqIdx := rollbackUop.cf.ftqPtr
704  io.rollback.bits.stFtqIdx := rollbackStFtqIdx
705  io.rollback.bits.ftqOffset := rollbackUop.cf.ftqOffset
706  io.rollback.bits.stFtqOffset := rollbackStFtqOffset
707  io.rollback.bits.level := RedirectLevel.flush
708  io.rollback.bits.interrupt := DontCare
709  io.rollback.bits.cfiUpdate := DontCare
710  io.rollback.bits.cfiUpdate.target := rollbackUop.cf.pc
711  io.rollback.bits.debug_runahead_checkpoint_id := rollbackUop.debugInfo.runahead_checkpoint_id
712  // io.rollback.bits.pc := DontCare
713
714  io.rollback.valid := rollbackValidVecChecked.asUInt.orR
715
716  when(io.rollback.valid) {
717    // XSDebug("Mem rollback: pc %x robidx %d\n", io.rollback.bits.cfi, io.rollback.bits.robIdx.asUInt)
718  }
719
720  /**
721  * Load-Load Memory violation detection
722  *
723  * When load arrives load_s1, it searches LoadQueue for younger load instructions
724  * with the same load physical address. If younger load has been released (or observed),
725  * the younger load needs to be re-execed.
726  *
727  * For now, if re-exec it found to be needed in load_s1, we mark the older load as replayInst,
728  * the two loads will be replayed if the older load becomes the head of rob.
729  *
730  * When dcache releases a line, mark all writebacked entrys in load queue with
731  * the same line paddr as released.
732  */
733
734  // Load-Load Memory violation query
735  val deqRightMask = UIntToMask.rightmask(deqPtr, LoadQueueSize)
736  (0 until LoadPipelineWidth).map(i => {
737    dataModule.io.release_violation(i).paddr := io.loadViolationQuery(i).req.bits.paddr
738    io.loadViolationQuery(i).req.ready := true.B
739    io.loadViolationQuery(i).resp.valid := RegNext(io.loadViolationQuery(i).req.fire())
740    // Generate real violation mask
741    // Note that we use UIntToMask.rightmask here
742    val startIndex = io.loadViolationQuery(i).req.bits.uop.lqIdx.value
743    val lqIdxMask = UIntToMask(startIndex, LoadQueueSize)
744    val xorMask = lqIdxMask ^ enqMask
745    val sameFlag = io.loadViolationQuery(i).req.bits.uop.lqIdx.flag === enqPtrExt(0).flag
746    val ldToEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
747    val ldld_violation_mask_gen_1 = WireInit(VecInit((0 until LoadQueueSize).map(j => {
748      ldToEnqPtrMask(j) && // the load is younger than current load
749      allocated(j) && // entry is valid
750      released(j) && // cacheline is released
751      (datavalid(j) || miss(j)) // paddr is valid
752    })))
753    val ldld_violation_mask_gen_2 = WireInit(VecInit((0 until LoadQueueSize).map(j => {
754      dataModule.io.release_violation(i).match_mask(j)// addr match
755      // addr match result is slow to generate, we RegNext() it
756    })))
757    val ldld_violation_mask = RegNext(ldld_violation_mask_gen_1).asUInt & RegNext(ldld_violation_mask_gen_2).asUInt
758    dontTouch(ldld_violation_mask)
759    ldld_violation_mask.suggestName("ldldViolationMask_" + i)
760    io.loadViolationQuery(i).resp.bits.have_violation := ldld_violation_mask.orR
761  })
762
763  // "released" flag update
764  //
765  // When io.release.valid (release1cycle.valid), it uses the last ld-ld paddr cam port to
766  // update release flag in 1 cycle
767
768  when(release1cycle.valid){
769    // Take over ld-ld paddr cam port
770    dataModule.io.release_violation.takeRight(1)(0).paddr := release1cycle.bits.paddr
771    io.loadViolationQuery.takeRight(1)(0).req.ready := false.B
772  }
773
774  when(release2cycle.valid){
775    // If a load comes in that cycle, we can not judge if it has ld-ld violation
776    // We replay that load inst from RS
777    io.loadViolationQuery.map(i => i.req.ready :=
778      // use lsu side release2cycle_dup_lsu paddr for better timing
779      !i.req.bits.paddr(PAddrBits-1, DCacheLineOffset) === release2cycle_dup_lsu.bits.paddr(PAddrBits-1, DCacheLineOffset)
780    )
781    // io.loadViolationQuery.map(i => i.req.ready := false.B) // For better timing
782  }
783
784  (0 until LoadQueueSize).map(i => {
785    when(RegNext(dataModule.io.release_violation.takeRight(1)(0).match_mask(i) &&
786      allocated(i) &&
787      datavalid(i) &&
788      release1cycle.valid
789    )){
790      // Note: if a load has missed in dcache and is waiting for refill in load queue,
791      // its released flag still needs to be set as true if addr matches.
792      released(i) := true.B
793    }
794  })
795
796  /**
797    * Memory mapped IO / other uncached operations
798    *
799    * States:
800    * (1) writeback from store units: mark as pending
801    * (2) when they reach ROB's head, they can be sent to uncache channel
802    * (3) response from uncache channel: mark as datavalid
803    * (4) writeback to ROB (and other units): mark as writebacked
804    * (5) ROB commits the instruction: same as normal instructions
805    */
806  //(2) when they reach ROB's head, they can be sent to uncache channel
807  val lqTailMmioPending = WireInit(pending(deqPtr))
808  val lqTailAllocated = WireInit(allocated(deqPtr))
809  val s_idle :: s_req :: s_resp :: s_wait :: Nil = Enum(4)
810  val uncacheState = RegInit(s_idle)
811  switch(uncacheState) {
812    is(s_idle) {
813      when(RegNext(io.rob.pendingld && lqTailMmioPending && lqTailAllocated)) {
814        uncacheState := s_req
815      }
816    }
817    is(s_req) {
818      when(io.uncache.req.fire()) {
819        uncacheState := s_resp
820      }
821    }
822    is(s_resp) {
823      when(io.uncache.resp.fire()) {
824        uncacheState := s_wait
825      }
826    }
827    is(s_wait) {
828      when(RegNext(io.rob.commit)) {
829        uncacheState := s_idle // ready for next mmio
830      }
831    }
832  }
833  io.uncache.req.valid := uncacheState === s_req
834
835  dataModule.io.uncache.raddr := deqPtrExtNext.value
836
837  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
838  io.uncache.req.bits.addr := dataModule.io.uncache.rdata.paddr
839  io.uncache.req.bits.data := dataModule.io.uncache.rdata.data
840  io.uncache.req.bits.mask := dataModule.io.uncache.rdata.mask
841
842  io.uncache.req.bits.id   := DontCare
843  io.uncache.req.bits.instrtype := DontCare
844
845  io.uncache.resp.ready := true.B
846
847  when (io.uncache.req.fire()) {
848    pending(deqPtr) := false.B
849
850    XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n",
851      uop(deqPtr).cf.pc,
852      io.uncache.req.bits.addr,
853      io.uncache.req.bits.data,
854      io.uncache.req.bits.cmd,
855      io.uncache.req.bits.mask
856    )
857  }
858
859  // (3) response from uncache channel: mark as datavalid
860  dataModule.io.uncache.wen := false.B
861  when(io.uncache.resp.fire()){
862    datavalid(deqPtr) := true.B
863    dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0))
864    dataModule.io.uncache.wen := true.B
865
866    XSDebug("uncache resp: data %x\n", io.refill.bits.data)
867  }
868
869  // Read vaddr for mem exception
870  // no inst will be commited 1 cycle before tval update
871  vaddrModule.io.raddr(0) := (deqPtrExt + commitCount).value
872  io.exceptionAddr.vaddr := vaddrModule.io.rdata(0)
873
874  // Read vaddr for debug
875  (0 until LoadPipelineWidth).map(i => {
876    vaddrModule.io.raddr(i+1) := loadWbSel(i)
877  })
878
879  (0 until LoadPipelineWidth).map(i => {
880    vaddrTriggerResultModule.io.raddr(i) := loadWbSelGen(i)
881    io.trigger(i).lqLoadAddrTriggerHitVec := Mux(
882      loadWbSelV(i),
883      vaddrTriggerResultModule.io.rdata(i),
884      VecInit(Seq.fill(3)(false.B))
885    )
886  })
887
888  // misprediction recovery / exception redirect
889  // invalidate lq term using robIdx
890  val needCancel = Wire(Vec(LoadQueueSize, Bool()))
891  for (i <- 0 until LoadQueueSize) {
892    needCancel(i) := uop(i).robIdx.needFlush(io.brqRedirect) && allocated(i)
893    when (needCancel(i)) {
894      allocated(i) := false.B
895    }
896  }
897
898  /**
899    * update pointers
900    */
901  val lastEnqCancel = PopCount(RegNext(VecInit(canEnqueue.zip(enqCancel).map(x => x._1 && x._2))))
902  val lastCycleCancelCount = PopCount(RegNext(needCancel))
903  val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept, PopCount(io.enq.req.map(_.valid)), 0.U)
904  when (lastCycleRedirect.valid) {
905    // we recover the pointers in the next cycle after redirect
906    enqPtrExt := VecInit(enqPtrExt.map(_ - (lastCycleCancelCount + lastEnqCancel)))
907  }.otherwise {
908    enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
909  }
910
911  deqPtrExtNext := deqPtrExt + commitCount
912  deqPtrExt := deqPtrExtNext
913
914  io.lqCancelCnt := RegNext(lastCycleCancelCount + lastEnqCancel)
915
916  /**
917    * misc
918    */
919  // perf counter
920  QueuePerf(LoadQueueSize, validCount, !allowEnqueue)
921  io.lqFull := !allowEnqueue
922  XSPerfAccumulate("rollback", io.rollback.valid) // rollback redirect generated
923  XSPerfAccumulate("mmioCycle", uncacheState =/= s_idle) // lq is busy dealing with uncache req
924  XSPerfAccumulate("mmioCnt", io.uncache.req.fire())
925  XSPerfAccumulate("refill", io.refill.valid)
926  XSPerfAccumulate("writeback_success", PopCount(VecInit(io.ldout.map(i => i.fire()))))
927  XSPerfAccumulate("writeback_blocked", PopCount(VecInit(io.ldout.map(i => i.valid && !i.ready))))
928  XSPerfAccumulate("utilization_miss", PopCount((0 until LoadQueueSize).map(i => allocated(i) && miss(i))))
929
930  if (env.EnableTopDown) {
931    val stall_loads_bound = WireDefault(0.B)
932    ExcitingUtils.addSink(stall_loads_bound, "stall_loads_bound", ExcitingUtils.Perf)
933    val have_miss_entry = (allocated zip miss).map(x => x._1 && x._2).reduce(_ || _)
934    val l1d_loads_bound = stall_loads_bound && !have_miss_entry
935    ExcitingUtils.addSource(l1d_loads_bound, "l1d_loads_bound", ExcitingUtils.Perf)
936    XSPerfAccumulate("l1d_loads_bound", l1d_loads_bound)
937    val stall_l1d_load_miss = stall_loads_bound && have_miss_entry
938    ExcitingUtils.addSource(stall_l1d_load_miss, "stall_l1d_load_miss", ExcitingUtils.Perf)
939    ExcitingUtils.addSink(WireInit(0.U), "stall_l1d_load_miss", ExcitingUtils.Perf)
940  }
941
942  val perfValidCount = RegNext(validCount)
943
944  val perfEvents = Seq(
945    ("rollback         ", io.rollback.valid),
946    ("mmioCycle        ", uncacheState =/= s_idle),
947    ("mmio_Cnt         ", io.uncache.req.fire()),
948    ("refill           ", io.refill.valid),
949    ("writeback_success", PopCount(VecInit(io.ldout.map(i => i.fire())))),
950    ("writeback_blocked", PopCount(VecInit(io.ldout.map(i => i.valid && !i.ready)))),
951    ("ltq_1_4_valid    ", (perfValidCount < (LoadQueueSize.U/4.U))),
952    ("ltq_2_4_valid    ", (perfValidCount > (LoadQueueSize.U/4.U)) & (perfValidCount <= (LoadQueueSize.U/2.U))),
953    ("ltq_3_4_valid    ", (perfValidCount > (LoadQueueSize.U/2.U)) & (perfValidCount <= (LoadQueueSize.U*3.U/4.U))),
954    ("ltq_4_4_valid    ", (perfValidCount > (LoadQueueSize.U*3.U/4.U)))
955  )
956  generatePerfEvent()
957
958  // debug info
959  XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr)
960
961  def PrintFlag(flag: Bool, name: String): Unit = {
962    when(flag) {
963      XSDebug(false, true.B, name)
964    }.otherwise {
965      XSDebug(false, true.B, " ")
966    }
967  }
968
969  for (i <- 0 until LoadQueueSize) {
970    XSDebug(i + " pc %x pa %x ", uop(i).cf.pc, debug_paddr(i))
971    PrintFlag(allocated(i), "a")
972    PrintFlag(allocated(i) && datavalid(i), "v")
973    PrintFlag(allocated(i) && writebacked(i), "w")
974    PrintFlag(allocated(i) && miss(i), "m")
975    PrintFlag(allocated(i) && pending(i), "p")
976    XSDebug(false, true.B, "\n")
977  }
978
979}
980