xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueue.scala (revision 54034ccddc2fa3b1a7b1887d4297f0df0de3bf31)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import xiangshan._
24import xiangshan.backend.fu.fpu.FPU
25import xiangshan.backend.rob.RobLsqIO
26import xiangshan.cache._
27import xiangshan.frontend.FtqPtr
28import xiangshan.ExceptionNO._
29
30class LqPtr(implicit p: Parameters) extends CircularQueuePtr[LqPtr](
31  p => p(XSCoreParamsKey).LoadQueueSize
32){
33}
34
35object LqPtr {
36  def apply(f: Bool, v: UInt)(implicit p: Parameters): LqPtr = {
37    val ptr = Wire(new LqPtr)
38    ptr.flag := f
39    ptr.value := v
40    ptr
41  }
42}
43
44trait HasLoadHelper { this: XSModule =>
45  def rdataHelper(uop: MicroOp, rdata: UInt): UInt = {
46    val fpWen = uop.ctrl.fpWen
47    LookupTree(uop.ctrl.fuOpType, List(
48      LSUOpType.lb   -> SignExt(rdata(7, 0) , XLEN),
49      LSUOpType.lh   -> SignExt(rdata(15, 0), XLEN),
50      /*
51          riscv-spec-20191213: 12.2 NaN Boxing of Narrower Values
52          Any operation that writes a narrower result to an f register must write
53          all 1s to the uppermost FLEN−n bits to yield a legal NaN-boxed value.
54      */
55      LSUOpType.lw   -> Mux(fpWen, FPU.box(rdata, FPU.S), SignExt(rdata(31, 0), XLEN)),
56      LSUOpType.ld   -> Mux(fpWen, FPU.box(rdata, FPU.D), SignExt(rdata(63, 0), XLEN)),
57      LSUOpType.lbu  -> ZeroExt(rdata(7, 0) , XLEN),
58      LSUOpType.lhu  -> ZeroExt(rdata(15, 0), XLEN),
59      LSUOpType.lwu  -> ZeroExt(rdata(31, 0), XLEN),
60    ))
61  }
62}
63
64class LqEnqIO(implicit p: Parameters) extends XSBundle {
65  val canAccept = Output(Bool())
66  val sqCanAccept = Input(Bool())
67  val needAlloc = Vec(exuParameters.LsExuCnt, Input(Bool()))
68  val req = Vec(exuParameters.LsExuCnt, Flipped(ValidIO(new MicroOp)))
69  val resp = Vec(exuParameters.LsExuCnt, Output(new LqPtr))
70}
71
72class LqTriggerIO(implicit p: Parameters) extends XSBundle {
73  val hitLoadAddrTriggerHitVec = Input(Vec(3, Bool()))
74  val lqLoadAddrTriggerHitVec = Output(Vec(3, Bool()))
75}
76
77// Load Queue
78class LoadQueue(implicit p: Parameters) extends XSModule
79  with HasDCacheParameters
80  with HasCircularQueuePtrHelper
81  with HasLoadHelper
82  with HasPerfEvents
83{
84  val io = IO(new Bundle() {
85    val enq = new LqEnqIO
86    val brqRedirect = Flipped(ValidIO(new Redirect))
87    val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle)))
88    val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle)))
89    val loadDataForwarded = Vec(LoadPipelineWidth, Input(Bool()))
90    val delayedLoadError = Vec(LoadPipelineWidth, Input(Bool()))
91    val dcacheRequireReplay = Vec(LoadPipelineWidth, Input(Bool()))
92    val ldout = Vec(LoadPipelineWidth, DecoupledIO(new ExuOutput)) // writeback int load
93    val load_s1 = Vec(LoadPipelineWidth, Flipped(new PipeLoadForwardQueryIO)) // TODO: to be renamed
94    val loadViolationQuery = Vec(LoadPipelineWidth, Flipped(new LoadViolationQueryIO))
95    val rob = Flipped(new RobLsqIO)
96    val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store
97    val refill = Flipped(ValidIO(new Refill))
98    val release = Flipped(ValidIO(new Release))
99    val uncache = new UncacheWordIO
100    val exceptionAddr = new ExceptionAddrIO
101    val lqFull = Output(Bool())
102    val lqCancelCnt = Output(UInt(log2Up(LoadQueueSize + 1).W))
103    val trigger = Vec(LoadPipelineWidth, new LqTriggerIO)
104  })
105
106  println("LoadQueue: size:" + LoadQueueSize)
107
108  val uop = Reg(Vec(LoadQueueSize, new MicroOp))
109  // val data = Reg(Vec(LoadQueueSize, new LsRobEntry))
110  val dataModule = Module(new LoadQueueDataWrapper(LoadQueueSize, wbNumRead = LoadPipelineWidth, wbNumWrite = LoadPipelineWidth))
111  dataModule.io := DontCare
112  val vaddrModule = Module(new SyncDataModuleTemplate(UInt(VAddrBits.W), LoadQueueSize, numRead = LoadPipelineWidth + 1, numWrite = LoadPipelineWidth))
113  vaddrModule.io := DontCare
114  val vaddrTriggerResultModule = Module(new SyncDataModuleTemplate(Vec(3, Bool()), LoadQueueSize, numRead = LoadPipelineWidth, numWrite = LoadPipelineWidth))
115  vaddrTriggerResultModule.io := DontCare
116  val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated
117  val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid
118  val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
119  val released = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // load data has been released by dcache
120  val error = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // load data has been corrupted
121  val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request
122  // val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result
123  val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of rob
124  val refilling = WireInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
125
126  val debug_mmio = Reg(Vec(LoadQueueSize, Bool())) // mmio: inst is an mmio inst
127  val debug_paddr = Reg(Vec(LoadQueueSize, UInt(PAddrBits.W))) // mmio: inst is an mmio inst
128
129  val enqPtrExt = RegInit(VecInit((0 until io.enq.req.length).map(_.U.asTypeOf(new LqPtr))))
130  val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
131  val deqPtrExtNext = Wire(new LqPtr)
132
133  val enqPtr = enqPtrExt(0).value
134  val deqPtr = deqPtrExt.value
135
136  val validCount = distanceBetween(enqPtrExt(0), deqPtrExt)
137  val allowEnqueue = validCount <= (LoadQueueSize - LoadPipelineWidth).U
138
139  val deqMask = UIntToMask(deqPtr, LoadQueueSize)
140  val enqMask = UIntToMask(enqPtr, LoadQueueSize)
141
142  val commitCount = RegNext(io.rob.lcommit)
143
144  val release1cycle = io.release
145  val release2cycle = RegNext(io.release)
146  val release2cycle_dup_lsu = RegNext(io.release)
147
148  /**
149    * Enqueue at dispatch
150    *
151    * Currently, LoadQueue only allows enqueue when #emptyEntries > EnqWidth
152    */
153  io.enq.canAccept := allowEnqueue
154
155  val canEnqueue = io.enq.req.map(_.valid)
156  val enqCancel = io.enq.req.map(_.bits.robIdx.needFlush(io.brqRedirect))
157  for (i <- 0 until io.enq.req.length) {
158    val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i))
159    val lqIdx = enqPtrExt(offset)
160    val index = io.enq.req(i).bits.lqIdx.value
161    when (canEnqueue(i) && !enqCancel(i)) {
162      uop(index).robIdx := io.enq.req(i).bits.robIdx
163      allocated(index) := true.B
164      datavalid(index) := false.B
165      writebacked(index) := false.B
166      released(index) := false.B
167      miss(index) := false.B
168      pending(index) := false.B
169      error(index) := false.B
170      XSError(!io.enq.canAccept || !io.enq.sqCanAccept, s"must accept $i\n")
171      XSError(index =/= lqIdx.value, s"must be the same entry $i\n")
172    }
173    io.enq.resp(i) := lqIdx
174  }
175  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
176
177  /**
178    * Writeback load from load units
179    *
180    * Most load instructions writeback to regfile at the same time.
181    * However,
182    *   (1) For an mmio instruction with exceptions, it writes back to ROB immediately.
183    *   (2) For an mmio instruction without exceptions, it does not write back.
184    * The mmio instruction will be sent to lower level when it reaches ROB's head.
185    * After uncache response, it will write back through arbiter with loadUnit.
186    *   (3) For cache misses, it is marked miss and sent to dcache later.
187    * After cache refills, it will write back through arbiter with loadUnit.
188    */
189  for (i <- 0 until LoadPipelineWidth) {
190    dataModule.io.wb.wen(i) := false.B
191    vaddrTriggerResultModule.io.wen(i) := false.B
192    val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
193
194    // most lq status need to be updated immediately after load writeback to lq
195    when(io.loadIn(i).fire()) {
196      when(io.loadIn(i).bits.miss) {
197        XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n",
198          io.loadIn(i).bits.uop.lqIdx.asUInt,
199          io.loadIn(i).bits.uop.cf.pc,
200          io.loadIn(i).bits.vaddr,
201          io.loadIn(i).bits.paddr,
202          io.loadIn(i).bits.data,
203          io.loadIn(i).bits.mask,
204          io.loadIn(i).bits.forwardData.asUInt,
205          io.loadIn(i).bits.forwardMask.asUInt,
206          io.loadIn(i).bits.mmio
207        )
208      }.otherwise {
209        XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n",
210        io.loadIn(i).bits.uop.lqIdx.asUInt,
211        io.loadIn(i).bits.uop.cf.pc,
212        io.loadIn(i).bits.vaddr,
213        io.loadIn(i).bits.paddr,
214        io.loadIn(i).bits.data,
215        io.loadIn(i).bits.mask,
216        io.loadIn(i).bits.forwardData.asUInt,
217        io.loadIn(i).bits.forwardMask.asUInt,
218        io.loadIn(i).bits.mmio
219      )}
220      if(EnableFastForward){
221        datavalid(loadWbIndex) := (!io.loadIn(i).bits.miss || io.loadDataForwarded(i)) &&
222          !io.loadIn(i).bits.mmio && // mmio data is not valid until we finished uncache access
223          !io.dcacheRequireReplay(i) // do not writeback if that inst will be resend from rs
224      } else {
225        datavalid(loadWbIndex) := (!io.loadIn(i).bits.miss || io.loadDataForwarded(i)) &&
226          !io.loadIn(i).bits.mmio // mmio data is not valid until we finished uncache access
227      }
228      writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
229
230      val loadWbData = Wire(new LQDataEntry)
231      loadWbData.paddr := io.loadIn(i).bits.paddr
232      loadWbData.mask := io.loadIn(i).bits.mask
233      loadWbData.data := io.loadIn(i).bits.forwardData.asUInt // fwd data
234      loadWbData.fwdMask := io.loadIn(i).bits.forwardMask
235      dataModule.io.wbWrite(i, loadWbIndex, loadWbData)
236      dataModule.io.wb.wen(i) := true.B
237
238      vaddrTriggerResultModule.io.waddr(i) := loadWbIndex
239      vaddrTriggerResultModule.io.wdata(i) := io.trigger(i).hitLoadAddrTriggerHitVec
240      vaddrTriggerResultModule.io.wen(i) := true.B
241
242      debug_mmio(loadWbIndex) := io.loadIn(i).bits.mmio
243      debug_paddr(loadWbIndex) := io.loadIn(i).bits.paddr
244
245      val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
246      if(EnableFastForward){
247        miss(loadWbIndex) := dcacheMissed && !io.loadDataForwarded(i) && !io.dcacheRequireReplay(i)
248      } else {
249        miss(loadWbIndex) := dcacheMissed && !io.loadDataForwarded(i)
250      }
251      pending(loadWbIndex) := io.loadIn(i).bits.mmio
252      released(loadWbIndex) := release2cycle.valid &&
253        io.loadIn(i).bits.paddr(PAddrBits-1, DCacheLineOffset) === release2cycle.bits.paddr(PAddrBits-1, DCacheLineOffset) ||
254        release1cycle.valid &&
255        io.loadIn(i).bits.paddr(PAddrBits-1, DCacheLineOffset) === release1cycle.bits.paddr(PAddrBits-1, DCacheLineOffset)
256      // dirty code for load instr
257      uop(loadWbIndex).pdest := io.loadIn(i).bits.uop.pdest
258      uop(loadWbIndex).cf := io.loadIn(i).bits.uop.cf
259      uop(loadWbIndex).ctrl := io.loadIn(i).bits.uop.ctrl
260      uop(loadWbIndex).debugInfo := io.loadIn(i).bits.uop.debugInfo
261    }
262
263    // vaddrModule write is delayed, as vaddrModule will not be read right after write
264    vaddrModule.io.waddr(i) := RegNext(loadWbIndex)
265    vaddrModule.io.wdata(i) := RegNext(io.loadIn(i).bits.vaddr)
266    vaddrModule.io.wen(i) := RegNext(io.loadIn(i).fire())
267  }
268
269  when(io.refill.valid) {
270    XSDebug("miss resp: paddr:0x%x data %x\n", io.refill.bits.addr, io.refill.bits.data)
271  }
272
273  // Refill 64 bit in a cycle
274  // Refill data comes back from io.dcache.resp
275  dataModule.io.refill.valid := io.refill.valid
276  dataModule.io.refill.paddr := io.refill.bits.addr
277  dataModule.io.refill.data := io.refill.bits.data
278
279  val dcacheRequireReplay = WireInit(VecInit((0 until LoadPipelineWidth).map(i =>{
280    RegNext(io.loadIn(i).fire()) && RegNext(io.dcacheRequireReplay(i))
281  })))
282  dontTouch(dcacheRequireReplay)
283
284  (0 until LoadQueueSize).map(i => {
285    dataModule.io.refill.refillMask(i) := allocated(i) && miss(i)
286    when(dataModule.io.refill.valid && dataModule.io.refill.refillMask(i) && dataModule.io.refill.matchMask(i)) {
287      datavalid(i) := true.B
288      miss(i) := false.B
289      when(!dcacheRequireReplay.asUInt.orR){
290        refilling(i) := true.B
291      }
292      when(io.refill.bits.error) {
293        error(i) := true.B
294      }
295    }
296  })
297
298  for (i <- 0 until LoadPipelineWidth) {
299    val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
300    val lastCycleLoadWbIndex = RegNext(loadWbIndex)
301    // update miss state in load s3
302    if(!EnableFastForward){
303      // dcacheRequireReplay will be used to update lq flag 1 cycle after for better timing
304      //
305      // io.dcacheRequireReplay comes from dcache miss req reject, which is quite slow to generate
306      when(dcacheRequireReplay(i) && !refill_addr_hit(RegNext(io.loadIn(i).bits.paddr), io.refill.bits.addr)) {
307        // do not writeback if that inst will be resend from rs
308        // rob writeback will not be triggered by a refill before inst replay
309        miss(lastCycleLoadWbIndex) := false.B // disable refill listening
310        datavalid(lastCycleLoadWbIndex) := false.B // disable refill listening
311        assert(!datavalid(lastCycleLoadWbIndex))
312      }
313    }
314    // update load error state in load s3
315    when(RegNext(io.loadIn(i).fire()) && io.delayedLoadError(i)){
316      uop(lastCycleLoadWbIndex).cf.exceptionVec(loadAccessFault) := true.B
317    }
318  }
319
320
321  // Writeback up to 2 missed load insts to CDB
322  //
323  // Pick 2 missed load (data refilled), write them back to cdb
324  // 2 refilled load will be selected from even/odd entry, separately
325
326  // Stage 0
327  // Generate writeback indexes
328
329  def getRemBits(input: UInt)(rem: Int): UInt = {
330    VecInit((0 until LoadQueueSize / LoadPipelineWidth).map(i => { input(LoadPipelineWidth * i + rem) })).asUInt
331  }
332
333  val loadWbSel = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W))) // index selected last cycle
334  val loadWbSelV = Wire(Vec(LoadPipelineWidth, Bool())) // index selected in last cycle is valid
335
336  val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => {
337    // allocated(i) && !writebacked(i) && (datavalid(i) || refilling(i))
338    allocated(i) && !writebacked(i) && datavalid(i) // query refilling will cause bad timing
339  })).asUInt() // use uint instead vec to reduce verilog lines
340  val remDeqMask = Seq.tabulate(LoadPipelineWidth)(getRemBits(deqMask)(_))
341  // generate lastCycleSelect mask
342  val remFireMask = Seq.tabulate(LoadPipelineWidth)(rem => getRemBits(UIntToOH(loadWbSel(rem)))(rem))
343  // generate real select vec
344  def toVec(a: UInt): Vec[Bool] = {
345    VecInit(a.asBools)
346  }
347  val loadRemSelVecFire = Seq.tabulate(LoadPipelineWidth)(rem => getRemBits(loadWbSelVec)(rem) & ~remFireMask(rem))
348  val loadRemSelVecNotFire = Seq.tabulate(LoadPipelineWidth)(getRemBits(loadWbSelVec)(_))
349  val loadRemSel = Seq.tabulate(LoadPipelineWidth)(rem => Mux(
350    io.ldout(rem).fire(),
351    getFirstOne(toVec(loadRemSelVecFire(rem)), remDeqMask(rem)),
352    getFirstOne(toVec(loadRemSelVecNotFire(rem)), remDeqMask(rem))
353  ))
354
355
356  val loadWbSelGen = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W)))
357  val loadWbSelVGen = Wire(Vec(LoadPipelineWidth, Bool()))
358  (0 until LoadPipelineWidth).foreach(index => {
359    loadWbSelGen(index) := (
360      if (LoadPipelineWidth > 1) Cat(loadRemSel(index), index.U(log2Ceil(LoadPipelineWidth).W))
361      else loadRemSel(index)
362    )
363    loadWbSelVGen(index) := Mux(io.ldout(index).fire, loadRemSelVecFire(index).asUInt.orR, loadRemSelVecNotFire(index).asUInt.orR)
364  })
365
366  (0 until LoadPipelineWidth).map(i => {
367    loadWbSel(i) := RegNext(loadWbSelGen(i))
368    loadWbSelV(i) := RegNext(loadWbSelVGen(i), init = false.B)
369    when(io.ldout(i).fire()){
370      // Mark them as writebacked, so they will not be selected in the next cycle
371      writebacked(loadWbSel(i)) := true.B
372    }
373  })
374
375  // Stage 1
376  // Use indexes generated in cycle 0 to read data
377  // writeback data to cdb
378  (0 until LoadPipelineWidth).map(i => {
379    // data select
380    dataModule.io.wb.raddr(i) := loadWbSelGen(i)
381    val rdata = dataModule.io.wb.rdata(i).data
382    val seluop = uop(loadWbSel(i))
383    val func = seluop.ctrl.fuOpType
384    val raddr = dataModule.io.wb.rdata(i).paddr
385    val rdataSel = LookupTree(raddr(2, 0), List(
386      "b000".U -> rdata(63, 0),
387      "b001".U -> rdata(63, 8),
388      "b010".U -> rdata(63, 16),
389      "b011".U -> rdata(63, 24),
390      "b100".U -> rdata(63, 32),
391      "b101".U -> rdata(63, 40),
392      "b110".U -> rdata(63, 48),
393      "b111".U -> rdata(63, 56)
394    ))
395    val rdataPartialLoad = rdataHelper(seluop, rdataSel)
396
397    // writeback missed int/fp load
398    //
399    // Int load writeback will finish (if not blocked) in one cycle
400    io.ldout(i).bits.uop := seluop
401    io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr)
402    io.ldout(i).bits.data := rdataPartialLoad
403    io.ldout(i).bits.redirectValid := false.B
404    io.ldout(i).bits.redirect := DontCare
405    io.ldout(i).bits.debug.isMMIO := debug_mmio(loadWbSel(i))
406    io.ldout(i).bits.debug.isPerfCnt := false.B
407    io.ldout(i).bits.debug.paddr := debug_paddr(loadWbSel(i))
408    io.ldout(i).bits.debug.vaddr := vaddrModule.io.rdata(i+1)
409    io.ldout(i).bits.fflags := DontCare
410    io.ldout(i).valid := loadWbSelV(i)
411
412    when(io.ldout(i).fire()) {
413      XSInfo("int load miss write to cbd robidx %d lqidx %d pc 0x%x mmio %x\n",
414        io.ldout(i).bits.uop.robIdx.asUInt,
415        io.ldout(i).bits.uop.lqIdx.asUInt,
416        io.ldout(i).bits.uop.cf.pc,
417        debug_mmio(loadWbSel(i))
418      )
419    }
420
421  })
422
423  /**
424    * Load commits
425    *
426    * When load commited, mark it as !allocated and move deqPtrExt forward.
427    */
428  (0 until CommitWidth).map(i => {
429    when(commitCount > i.U){
430      allocated((deqPtrExt+i.U).value) := false.B
431      XSError(!allocated((deqPtrExt+i.U).value), s"why commit invalid entry $i?\n")
432    }
433  })
434
435  def getFirstOne(mask: Vec[Bool], startMask: UInt) = {
436    val length = mask.length
437    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
438    val highBitsUint = Cat(highBits.reverse)
439    PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt))
440  }
441
442  def getOldest[T <: XSBundleWithMicroOp](valid: Seq[Bool], bits: Seq[T]): (Seq[Bool], Seq[T]) = {
443    assert(valid.length == bits.length)
444    assert(isPow2(valid.length))
445    if (valid.length == 1) {
446      (valid, bits)
447    } else if (valid.length == 2) {
448      val res = Seq.fill(2)(Wire(ValidIO(chiselTypeOf(bits(0)))))
449      for (i <- res.indices) {
450        res(i).valid := valid(i)
451        res(i).bits := bits(i)
452      }
453      val oldest = Mux(valid(0) && valid(1), Mux(isAfter(bits(0).uop.robIdx, bits(1).uop.robIdx), res(1), res(0)), Mux(valid(0) && !valid(1), res(0), res(1)))
454      (Seq(oldest.valid), Seq(oldest.bits))
455    } else {
456      val left = getOldest(valid.take(valid.length / 2), bits.take(valid.length / 2))
457      val right = getOldest(valid.takeRight(valid.length / 2), bits.takeRight(valid.length / 2))
458      getOldest(left._1 ++ right._1, left._2 ++ right._2)
459    }
460  }
461
462  def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = {
463    assert(valid.length == uop.length)
464    val length = valid.length
465    (0 until length).map(i => {
466      (0 until length).map(j => {
467        Mux(valid(i) && valid(j),
468          isAfter(uop(i).robIdx, uop(j).robIdx),
469          Mux(!valid(i), true.B, false.B))
470      })
471    })
472  }
473
474  /**
475    * Store-Load Memory violation detection
476    *
477    * When store writes back, it searches LoadQueue for younger load instructions
478    * with the same load physical address. They loaded wrong data and need re-execution.
479    *
480    * Cycle 0: Store Writeback
481    *   Generate match vector for store address with rangeMask(stPtr, enqPtr).
482    *   Besides, load instructions in LoadUnit_S1 and S2 are also checked.
483    * Cycle 1: Redirect Generation
484    *   There're three possible types of violations, up to 6 possible redirect requests.
485    *   Choose the oldest load (part 1). (4 + 2) -> (1 + 2)
486    * Cycle 2: Redirect Fire
487    *   Choose the oldest load (part 2). (3 -> 1)
488    *   Prepare redirect request according to the detected violation.
489    *   Fire redirect request (if valid)
490    */
491
492  // stage 0:        lq l1 wb     l1 wb lq
493  //                 |  |  |      |  |  |  (paddr match)
494  // stage 1:        lq l1 wb     l1 wb lq
495  //                 |  |  |      |  |  |
496  //                 |  |------------|  |
497  //                 |        |         |
498  // stage 2:        lq      l1wb       lq
499  //                 |        |         |
500  //                 --------------------
501  //                          |
502  //                      rollback req
503  io.load_s1 := DontCare
504  def detectRollback(i: Int) = {
505    val startIndex = io.storeIn(i).bits.uop.lqIdx.value
506    val lqIdxMask = UIntToMask(startIndex, LoadQueueSize)
507    val xorMask = lqIdxMask ^ enqMask
508    val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt(0).flag
509    val stToEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
510
511    // check if load already in lq needs to be rolledback
512    dataModule.io.violation(i).paddr := io.storeIn(i).bits.paddr
513    dataModule.io.violation(i).mask := io.storeIn(i).bits.mask
514    val addrMaskMatch = RegNext(dataModule.io.violation(i).violationMask)
515    val entryNeedCheck = RegNext(VecInit((0 until LoadQueueSize).map(j => {
516      allocated(j) && stToEnqPtrMask(j) && (datavalid(j) || miss(j))
517    })))
518    val lqViolationVec = VecInit((0 until LoadQueueSize).map(j => {
519      addrMaskMatch(j) && entryNeedCheck(j)
520    }))
521    val lqViolation = lqViolationVec.asUInt().orR()
522    val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask))
523    val lqViolationUop = uop(lqViolationIndex)
524    // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag
525    // lqViolationUop.lqIdx.value := lqViolationIndex
526    XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n")
527
528    // when l/s writeback to rob together, check if rollback is needed
529    val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
530      io.loadIn(j).valid &&
531      isAfter(io.loadIn(j).bits.uop.robIdx, io.storeIn(i).bits.uop.robIdx) &&
532      io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) &&
533      (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR
534    })))
535    val wbViolation = wbViolationVec.asUInt().orR() && RegNext(io.storeIn(i).valid && !io.storeIn(i).bits.miss)
536    val wbViolationUop = getOldest(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits))))._2(0).uop
537    XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n")
538
539    // check if rollback is needed for load in l1
540    val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
541      io.load_s1(j).valid && // L1 valid
542      isAfter(io.load_s1(j).uop.robIdx, io.storeIn(i).bits.uop.robIdx) &&
543      io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.load_s1(j).paddr(PAddrBits - 1, 3) &&
544      (io.storeIn(i).bits.mask & io.load_s1(j).mask).orR
545    })))
546    val l1Violation = l1ViolationVec.asUInt().orR() && RegNext(io.storeIn(i).valid && !io.storeIn(i).bits.miss)
547    val load_s1 = Wire(Vec(LoadPipelineWidth, new XSBundleWithMicroOp))
548    (0 until LoadPipelineWidth).foreach(i => load_s1(i).uop := io.load_s1(i).uop)
549    val l1ViolationUop = getOldest(l1ViolationVec, RegNext(load_s1))._2(0).uop
550    XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n")
551
552    XSDebug(
553      l1Violation,
554      "need rollback (l1 load) pc %x robidx %d target %x\n",
555      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.robIdx.asUInt, l1ViolationUop.robIdx.asUInt
556    )
557    XSDebug(
558      lqViolation,
559      "need rollback (ld wb before store) pc %x robidx %d target %x\n",
560      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.robIdx.asUInt, lqViolationUop.robIdx.asUInt
561    )
562    XSDebug(
563      wbViolation,
564      "need rollback (ld/st wb together) pc %x robidx %d target %x\n",
565      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.robIdx.asUInt, wbViolationUop.robIdx.asUInt
566    )
567
568    ((lqViolation, lqViolationUop), (wbViolation, wbViolationUop), (l1Violation, l1ViolationUop))
569  }
570
571  def rollbackSel(a: Valid[MicroOpRbExt], b: Valid[MicroOpRbExt]): ValidIO[MicroOpRbExt] = {
572    Mux(
573      a.valid,
574      Mux(
575        b.valid,
576        Mux(isAfter(a.bits.uop.robIdx, b.bits.uop.robIdx), b, a), // a,b both valid, sel oldest
577        a // sel a
578      ),
579      b // sel b
580    )
581  }
582  val lastCycleRedirect = RegNext(io.brqRedirect)
583  val lastlastCycleRedirect = RegNext(lastCycleRedirect)
584
585  // S2: select rollback (part1) and generate rollback request
586  // rollback check
587  // Wb/L1 rollback seq check is done in s2
588  val rollbackWb = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt)))
589  val rollbackL1 = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt)))
590  val rollbackL1Wb = Wire(Vec(StorePipelineWidth*2, Valid(new MicroOpRbExt)))
591  // Lq rollback seq check is done in s3 (next stage), as getting rollbackLq MicroOp is slow
592  val rollbackLq = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt)))
593  // store ftq index for store set update
594  val stFtqIdxS2 = Wire(Vec(StorePipelineWidth, new FtqPtr))
595  val stFtqOffsetS2 = Wire(Vec(StorePipelineWidth, UInt(log2Up(PredictWidth).W)))
596  for (i <- 0 until StorePipelineWidth) {
597    val detectedRollback = detectRollback(i)
598    rollbackLq(i).valid := detectedRollback._1._1 && RegNext(io.storeIn(i).valid)
599    rollbackLq(i).bits.uop := detectedRollback._1._2
600    rollbackLq(i).bits.flag := i.U
601    rollbackWb(i).valid := detectedRollback._2._1 && RegNext(io.storeIn(i).valid)
602    rollbackWb(i).bits.uop := detectedRollback._2._2
603    rollbackWb(i).bits.flag := i.U
604    rollbackL1(i).valid := detectedRollback._3._1 && RegNext(io.storeIn(i).valid)
605    rollbackL1(i).bits.uop := detectedRollback._3._2
606    rollbackL1(i).bits.flag := i.U
607    rollbackL1Wb(2*i) := rollbackL1(i)
608    rollbackL1Wb(2*i+1) := rollbackWb(i)
609    stFtqIdxS2(i) := RegNext(io.storeIn(i).bits.uop.cf.ftqPtr)
610    stFtqOffsetS2(i) := RegNext(io.storeIn(i).bits.uop.cf.ftqOffset)
611  }
612
613  val rollbackL1WbSelected = ParallelOperation(rollbackL1Wb, rollbackSel)
614  val rollbackL1WbVReg = RegNext(rollbackL1WbSelected.valid)
615  val rollbackL1WbReg = RegEnable(rollbackL1WbSelected.bits, rollbackL1WbSelected.valid)
616  val rollbackLqVReg = rollbackLq.map(x => RegNext(x.valid))
617  val rollbackLqReg = rollbackLq.map(x => RegEnable(x.bits, x.valid))
618
619  // S3: select rollback (part2), generate rollback request, then fire rollback request
620  // Note that we use robIdx - 1.U to flush the load instruction itself.
621  // Thus, here if last cycle's robIdx equals to this cycle's robIdx, it still triggers the redirect.
622
623  val rollbackValidVec = rollbackL1WbVReg +: rollbackLqVReg
624  val rollbackUopExtVec = rollbackL1WbReg +: rollbackLqReg
625
626  // select uop in parallel
627  val mask = getAfterMask(rollbackValidVec, rollbackUopExtVec.map(i => i.uop))
628  val lqs = getOldest(rollbackLqVReg, rollbackLqReg)
629  val rollbackUopExt = getOldest(lqs._1 :+ rollbackL1WbVReg, lqs._2 :+ rollbackL1WbReg)._2(0)
630  val stFtqIdxS3 = RegNext(stFtqIdxS2)
631  val stFtqOffsetS3 = RegNext(stFtqOffsetS2)
632  val rollbackUop = rollbackUopExt.uop
633  val rollbackStFtqIdx = stFtqIdxS3(rollbackUopExt.flag)
634  val rollbackStFtqOffset = stFtqOffsetS3(rollbackUopExt.flag)
635
636  // check if rollback request is still valid in parallel
637  val rollbackValidVecChecked = Wire(Vec(LoadPipelineWidth + 1, Bool()))
638  for(((v, uop), idx) <- rollbackValidVec.zip(rollbackUopExtVec.map(i => i.uop)).zipWithIndex) {
639    rollbackValidVecChecked(idx) := v &&
640      (!lastCycleRedirect.valid || isBefore(uop.robIdx, lastCycleRedirect.bits.robIdx)) &&
641      (!lastlastCycleRedirect.valid || isBefore(uop.robIdx, lastlastCycleRedirect.bits.robIdx))
642  }
643
644  io.rollback.bits.robIdx := rollbackUop.robIdx
645  io.rollback.bits.ftqIdx := rollbackUop.cf.ftqPtr
646  io.rollback.bits.stFtqIdx := rollbackStFtqIdx
647  io.rollback.bits.ftqOffset := rollbackUop.cf.ftqOffset
648  io.rollback.bits.stFtqOffset := rollbackStFtqOffset
649  io.rollback.bits.level := RedirectLevel.flush
650  io.rollback.bits.interrupt := DontCare
651  io.rollback.bits.cfiUpdate := DontCare
652  io.rollback.bits.cfiUpdate.target := rollbackUop.cf.pc
653  io.rollback.bits.debug_runahead_checkpoint_id := rollbackUop.debugInfo.runahead_checkpoint_id
654  // io.rollback.bits.pc := DontCare
655
656  io.rollback.valid := rollbackValidVecChecked.asUInt.orR
657
658  when(io.rollback.valid) {
659    // XSDebug("Mem rollback: pc %x robidx %d\n", io.rollback.bits.cfi, io.rollback.bits.robIdx.asUInt)
660  }
661
662  /**
663  * Load-Load Memory violation detection
664  *
665  * When load arrives load_s1, it searches LoadQueue for younger load instructions
666  * with the same load physical address. If younger load has been released (or observed),
667  * the younger load needs to be re-execed.
668  *
669  * For now, if re-exec it found to be needed in load_s1, we mark the older load as replayInst,
670  * the two loads will be replayed if the older load becomes the head of rob.
671  *
672  * When dcache releases a line, mark all writebacked entrys in load queue with
673  * the same line paddr as released.
674  */
675
676  // Load-Load Memory violation query
677  val deqRightMask = UIntToMask.rightmask(deqPtr, LoadQueueSize)
678  (0 until LoadPipelineWidth).map(i => {
679    dataModule.io.release_violation(i).paddr := io.loadViolationQuery(i).req.bits.paddr
680    io.loadViolationQuery(i).req.ready := true.B
681    io.loadViolationQuery(i).resp.valid := RegNext(io.loadViolationQuery(i).req.fire())
682    // Generate real violation mask
683    // Note that we use UIntToMask.rightmask here
684    val startIndex = io.loadViolationQuery(i).req.bits.uop.lqIdx.value
685    val lqIdxMask = UIntToMask(startIndex, LoadQueueSize)
686    val xorMask = lqIdxMask ^ enqMask
687    val sameFlag = io.loadViolationQuery(i).req.bits.uop.lqIdx.flag === enqPtrExt(0).flag
688    val ldToEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
689    val ldld_violation_mask_gen_1 = WireInit(VecInit((0 until LoadQueueSize).map(j => {
690      ldToEnqPtrMask(j) && // the load is younger than current load
691      allocated(j) && // entry is valid
692      released(j) && // cacheline is released
693      (datavalid(j) || miss(j)) // paddr is valid
694    })))
695    val ldld_violation_mask_gen_2 = WireInit(VecInit((0 until LoadQueueSize).map(j => {
696      dataModule.io.release_violation(i).match_mask(j)// addr match
697      // addr match result is slow to generate, we RegNext() it
698    })))
699    val ldld_violation_mask = RegNext(ldld_violation_mask_gen_1).asUInt & RegNext(ldld_violation_mask_gen_2).asUInt
700    dontTouch(ldld_violation_mask)
701    ldld_violation_mask.suggestName("ldldViolationMask_" + i)
702    io.loadViolationQuery(i).resp.bits.have_violation := ldld_violation_mask.orR
703  })
704
705  // "released" flag update
706  //
707  // When io.release.valid (release1cycle.valid), it uses the last ld-ld paddr cam port to
708  // update release flag in 1 cycle
709
710  when(release1cycle.valid){
711    // Take over ld-ld paddr cam port
712    dataModule.io.release_violation.takeRight(1)(0).paddr := release1cycle.bits.paddr
713    io.loadViolationQuery.takeRight(1)(0).req.ready := false.B
714  }
715
716  when(release2cycle.valid){
717    // If a load comes in that cycle, we can not judge if it has ld-ld violation
718    // We replay that load inst from RS
719    io.loadViolationQuery.map(i => i.req.ready :=
720      // use lsu side release2cycle_dup_lsu paddr for better timing
721      !i.req.bits.paddr(PAddrBits-1, DCacheLineOffset) === release2cycle_dup_lsu.bits.paddr(PAddrBits-1, DCacheLineOffset)
722    )
723    // io.loadViolationQuery.map(i => i.req.ready := false.B) // For better timing
724  }
725
726  (0 until LoadQueueSize).map(i => {
727    when(RegNext(dataModule.io.release_violation.takeRight(1)(0).match_mask(i) &&
728      allocated(i) &&
729      datavalid(i) &&
730      release1cycle.valid
731    )){
732      // Note: if a load has missed in dcache and is waiting for refill in load queue,
733      // its released flag still needs to be set as true if addr matches.
734      released(i) := true.B
735    }
736  })
737
738  /**
739    * Memory mapped IO / other uncached operations
740    *
741    * States:
742    * (1) writeback from store units: mark as pending
743    * (2) when they reach ROB's head, they can be sent to uncache channel
744    * (3) response from uncache channel: mark as datavalid
745    * (4) writeback to ROB (and other units): mark as writebacked
746    * (5) ROB commits the instruction: same as normal instructions
747    */
748  //(2) when they reach ROB's head, they can be sent to uncache channel
749  val lqTailMmioPending = WireInit(pending(deqPtr))
750  val lqTailAllocated = WireInit(allocated(deqPtr))
751  val s_idle :: s_req :: s_resp :: s_wait :: Nil = Enum(4)
752  val uncacheState = RegInit(s_idle)
753  switch(uncacheState) {
754    is(s_idle) {
755      when(RegNext(io.rob.pendingld && lqTailMmioPending && lqTailAllocated)) {
756        uncacheState := s_req
757      }
758    }
759    is(s_req) {
760      when(io.uncache.req.fire()) {
761        uncacheState := s_resp
762      }
763    }
764    is(s_resp) {
765      when(io.uncache.resp.fire()) {
766        uncacheState := s_wait
767      }
768    }
769    is(s_wait) {
770      when(RegNext(io.rob.commit)) {
771        uncacheState := s_idle // ready for next mmio
772      }
773    }
774  }
775  io.uncache.req.valid := uncacheState === s_req
776
777  dataModule.io.uncache.raddr := deqPtrExtNext.value
778
779  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
780  io.uncache.req.bits.addr := dataModule.io.uncache.rdata.paddr
781  io.uncache.req.bits.data := dataModule.io.uncache.rdata.data
782  io.uncache.req.bits.mask := dataModule.io.uncache.rdata.mask
783
784  io.uncache.req.bits.id   := DontCare
785  io.uncache.req.bits.instrtype := DontCare
786
787  io.uncache.resp.ready := true.B
788
789  when (io.uncache.req.fire()) {
790    pending(deqPtr) := false.B
791
792    XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n",
793      uop(deqPtr).cf.pc,
794      io.uncache.req.bits.addr,
795      io.uncache.req.bits.data,
796      io.uncache.req.bits.cmd,
797      io.uncache.req.bits.mask
798    )
799  }
800
801  // (3) response from uncache channel: mark as datavalid
802  dataModule.io.uncache.wen := false.B
803  when(io.uncache.resp.fire()){
804    datavalid(deqPtr) := true.B
805    dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0))
806    dataModule.io.uncache.wen := true.B
807
808    XSDebug("uncache resp: data %x\n", io.refill.bits.data)
809  }
810
811  // Read vaddr for mem exception
812  // no inst will be commited 1 cycle before tval update
813  vaddrModule.io.raddr(0) := (deqPtrExt + commitCount).value
814  io.exceptionAddr.vaddr := vaddrModule.io.rdata(0)
815
816  // Read vaddr for debug
817  (0 until LoadPipelineWidth).map(i => {
818    vaddrModule.io.raddr(i+1) := loadWbSel(i)
819  })
820
821  (0 until LoadPipelineWidth).map(i => {
822    vaddrTriggerResultModule.io.raddr(i) := loadWbSelGen(i)
823    io.trigger(i).lqLoadAddrTriggerHitVec := Mux(
824      loadWbSelV(i),
825      vaddrTriggerResultModule.io.rdata(i),
826      VecInit(Seq.fill(3)(false.B))
827    )
828  })
829
830  // misprediction recovery / exception redirect
831  // invalidate lq term using robIdx
832  val needCancel = Wire(Vec(LoadQueueSize, Bool()))
833  for (i <- 0 until LoadQueueSize) {
834    needCancel(i) := uop(i).robIdx.needFlush(io.brqRedirect) && allocated(i)
835    when (needCancel(i)) {
836      allocated(i) := false.B
837    }
838  }
839
840  /**
841    * update pointers
842    */
843  val lastEnqCancel = PopCount(RegNext(VecInit(canEnqueue.zip(enqCancel).map(x => x._1 && x._2))))
844  val lastCycleCancelCount = PopCount(RegNext(needCancel))
845  val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept, PopCount(io.enq.req.map(_.valid)), 0.U)
846  when (lastCycleRedirect.valid) {
847    // we recover the pointers in the next cycle after redirect
848    enqPtrExt := VecInit(enqPtrExt.map(_ - (lastCycleCancelCount + lastEnqCancel)))
849  }.otherwise {
850    enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
851  }
852
853  deqPtrExtNext := deqPtrExt + commitCount
854  deqPtrExt := deqPtrExtNext
855
856  io.lqCancelCnt := RegNext(lastCycleCancelCount + lastEnqCancel)
857
858  /**
859    * misc
860    */
861  // perf counter
862  QueuePerf(LoadQueueSize, validCount, !allowEnqueue)
863  io.lqFull := !allowEnqueue
864  XSPerfAccumulate("rollback", io.rollback.valid) // rollback redirect generated
865  XSPerfAccumulate("mmioCycle", uncacheState =/= s_idle) // lq is busy dealing with uncache req
866  XSPerfAccumulate("mmioCnt", io.uncache.req.fire())
867  XSPerfAccumulate("refill", io.refill.valid)
868  XSPerfAccumulate("writeback_success", PopCount(VecInit(io.ldout.map(i => i.fire()))))
869  XSPerfAccumulate("writeback_blocked", PopCount(VecInit(io.ldout.map(i => i.valid && !i.ready))))
870  XSPerfAccumulate("utilization_miss", PopCount((0 until LoadQueueSize).map(i => allocated(i) && miss(i))))
871
872  val perfValidCount = RegNext(validCount)
873
874  val perfEvents = Seq(
875    ("rollback         ", io.rollback.valid),
876    ("mmioCycle        ", uncacheState =/= s_idle),
877    ("mmio_Cnt         ", io.uncache.req.fire()),
878    ("refill           ", io.refill.valid),
879    ("writeback_success", PopCount(VecInit(io.ldout.map(i => i.fire())))),
880    ("writeback_blocked", PopCount(VecInit(io.ldout.map(i => i.valid && !i.ready)))),
881    ("ltq_1_4_valid    ", (perfValidCount < (LoadQueueSize.U/4.U))),
882    ("ltq_2_4_valid    ", (perfValidCount > (LoadQueueSize.U/4.U)) & (perfValidCount <= (LoadQueueSize.U/2.U))),
883    ("ltq_3_4_valid    ", (perfValidCount > (LoadQueueSize.U/2.U)) & (perfValidCount <= (LoadQueueSize.U*3.U/4.U))),
884    ("ltq_4_4_valid    ", (perfValidCount > (LoadQueueSize.U*3.U/4.U)))
885  )
886  generatePerfEvent()
887
888  // debug info
889  XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr)
890
891  def PrintFlag(flag: Bool, name: String): Unit = {
892    when(flag) {
893      XSDebug(false, true.B, name)
894    }.otherwise {
895      XSDebug(false, true.B, " ")
896    }
897  }
898
899  for (i <- 0 until LoadQueueSize) {
900    XSDebug(i + " pc %x pa %x ", uop(i).cf.pc, debug_paddr(i))
901    PrintFlag(allocated(i), "a")
902    PrintFlag(allocated(i) && datavalid(i), "v")
903    PrintFlag(allocated(i) && writebacked(i), "w")
904    PrintFlag(allocated(i) && miss(i), "m")
905    PrintFlag(allocated(i) && pending(i), "p")
906    XSDebug(false, true.B, "\n")
907  }
908
909}
910