xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueue.scala (revision c7658a75968a011d92bf164a1a55872e64f06d44)
1package xiangshan.mem
2
3import chisel3._
4import chisel3.util._
5import utils._
6import xiangshan._
7import xiangshan.cache._
8import xiangshan.cache.{DCacheWordIO, DCacheLineIO, TlbRequestIO, MemoryOpConstants}
9import xiangshan.backend.LSUOpType
10import xiangshan.mem._
11import xiangshan.backend.roq.RoqPtr
12import xiangshan.backend.fu.fpu.boxF32ToF64
13
14
15class LqPtr extends CircularQueuePtr(LqPtr.LoadQueueSize) { }
16
17object LqPtr extends HasXSParameter {
18  def apply(f: Bool, v: UInt): LqPtr = {
19    val ptr = Wire(new LqPtr)
20    ptr.flag := f
21    ptr.value := v
22    ptr
23  }
24}
25
26
27// Load Queue
28class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueuePtrHelper {
29  val io = IO(new Bundle() {
30    val dp1Req = Vec(RenameWidth, Flipped(DecoupledIO(new MicroOp)))
31    val lqIdxs = Output(Vec(RenameWidth, new LqPtr)) // LSIdx will be assembled in LSQWrapper
32    val brqRedirect = Input(Valid(new Redirect))
33    val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle)))
34    val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) // FIXME: Valid() only
35    val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback load
36    val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
37    val commits = Flipped(Vec(CommitWidth, Valid(new RoqCommit)))
38    val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store
39    val dcache = new DCacheLineIO
40    val uncache = new DCacheWordIO
41    val roqDeqPtr = Input(new RoqPtr)
42    val exceptionAddr = new ExceptionAddrIO
43    // val refill = Flipped(Valid(new DCacheLineReq ))
44  })
45
46  val uop = Reg(Vec(LoadQueueSize, new MicroOp))
47  val data = Reg(Vec(LoadQueueSize, new LsRoqEntry)) // FIXME: use LoadQueueEntry instead
48  val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated
49  val valid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid
50  val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
51  val commited = Reg(Vec(LoadQueueSize, Bool())) // inst has been writebacked to CDB
52  val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request
53  val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result
54  val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq
55
56  val ringBufferHeadExtended = RegInit(0.U.asTypeOf(new LqPtr))
57  val ringBufferTailExtended = RegInit(0.U.asTypeOf(new LqPtr))
58  val ringBufferHead = ringBufferHeadExtended.value
59  val ringBufferTail = ringBufferTailExtended.value
60  val ringBufferSameFlag = ringBufferHeadExtended.flag === ringBufferTailExtended.flag
61  val ringBufferEmpty = ringBufferHead === ringBufferTail && ringBufferSameFlag
62  val ringBufferFull = ringBufferHead === ringBufferTail && !ringBufferSameFlag
63  val ringBufferAllowin = !ringBufferFull
64
65  val loadCommit = (0 until CommitWidth).map(i => io.commits(i).valid && !io.commits(i).bits.isWalk && io.commits(i).bits.uop.ctrl.commitType === CommitType.LOAD)
66  val mcommitIdx = (0 until CommitWidth).map(i => io.commits(i).bits.uop.lqIdx.value)
67
68  val tailMask = (((1.U((LoadQueueSize + 1).W)) << ringBufferTail).asUInt - 1.U)(LoadQueueSize - 1, 0)
69  val headMask = (((1.U((LoadQueueSize + 1).W)) << ringBufferHead).asUInt - 1.U)(LoadQueueSize - 1, 0)
70  val enqDeqMask1 = tailMask ^ headMask
71  val enqDeqMask = Mux(ringBufferSameFlag, enqDeqMask1, ~enqDeqMask1)
72
73  // TODO: misc arbitor
74
75  // Enqueue at dispatch
76  val emptyEntries = LoadQueueSize.U - distanceBetween(ringBufferHeadExtended, ringBufferTailExtended)
77  XSDebug("(ready, valid): ")
78  for (i <- 0 until RenameWidth) {
79    val offset = if (i == 0) 0.U else PopCount((0 until i).map(io.dp1Req(_).valid))
80    val lqIdx = ringBufferHeadExtended + offset
81    val index = lqIdx.value
82    when(io.dp1Req(i).fire()) {
83      uop(index) := io.dp1Req(i).bits
84      allocated(index) := true.B
85      valid(index) := false.B
86      writebacked(index) := false.B
87      commited(index) := false.B
88      miss(index) := false.B
89      listening(index) := false.B
90      pending(index) := false.B
91      // data(index).bwdMask := 0.U(8.W).asBools
92    }
93    val numTryEnqueue = offset +& io.dp1Req(i).valid
94    io.dp1Req(i).ready := numTryEnqueue <= emptyEntries
95    io.lqIdxs(i) := lqIdx
96    XSDebug(false, true.B, "(%d, %d) ", io.dp1Req(i).ready, io.dp1Req(i).valid)
97  }
98  XSDebug(false, true.B, "\n")
99
100  val firedDispatch = VecInit((0 until CommitWidth).map(io.dp1Req(_).fire())).asUInt
101  when(firedDispatch.orR) {
102    ringBufferHeadExtended := ringBufferHeadExtended + PopCount(firedDispatch)
103    XSInfo("dispatched %d insts to lq\n", PopCount(firedDispatch))
104  }
105
106  // writeback load
107  (0 until LoadPipelineWidth).map(i => {
108    when(io.loadIn(i).fire()) {
109      when(io.loadIn(i).bits.miss) {
110        XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n",
111          io.loadIn(i).bits.uop.lqIdx.asUInt,
112          io.loadIn(i).bits.uop.cf.pc,
113          io.loadIn(i).bits.vaddr,
114          io.loadIn(i).bits.paddr,
115          io.loadIn(i).bits.data,
116          io.loadIn(i).bits.mask,
117          io.loadIn(i).bits.forwardData.asUInt,
118          io.loadIn(i).bits.forwardMask.asUInt,
119          io.loadIn(i).bits.mmio,
120          io.loadIn(i).bits.rollback,
121          io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
122          )
123        }.otherwise {
124          XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n",
125          io.loadIn(i).bits.uop.lqIdx.asUInt,
126          io.loadIn(i).bits.uop.cf.pc,
127          io.loadIn(i).bits.vaddr,
128          io.loadIn(i).bits.paddr,
129          io.loadIn(i).bits.data,
130          io.loadIn(i).bits.mask,
131          io.loadIn(i).bits.forwardData.asUInt,
132          io.loadIn(i).bits.forwardMask.asUInt,
133          io.loadIn(i).bits.mmio,
134          io.loadIn(i).bits.rollback,
135          io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
136          )
137        }
138        val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
139        valid(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
140        writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
141        // allocated(loadWbIndex) := io.loadIn(i).bits.miss // if hit, lq entry can be recycled
142        data(loadWbIndex).paddr := io.loadIn(i).bits.paddr
143        data(loadWbIndex).vaddr := io.loadIn(i).bits.vaddr
144        data(loadWbIndex).mask := io.loadIn(i).bits.mask
145        data(loadWbIndex).data := io.loadIn(i).bits.data // for mmio / misc / debug
146        data(loadWbIndex).mmio := io.loadIn(i).bits.mmio
147        data(loadWbIndex).fwdMask := io.loadIn(i).bits.forwardMask
148        data(loadWbIndex).fwdData := io.loadIn(i).bits.forwardData
149        data(loadWbIndex).exception := io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
150        val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
151        miss(loadWbIndex) := dcacheMissed
152        listening(loadWbIndex) := dcacheMissed
153        pending(loadWbIndex) := io.loadIn(i).bits.mmio
154      }
155    })
156
157  // cache miss request
158  val inflightReqs = RegInit(VecInit(Seq.fill(cfg.nLoadMissEntries)(0.U.asTypeOf(new InflightBlockInfo))))
159  val inflightReqFull = inflightReqs.map(req => req.valid).reduce(_&&_)
160  val reqBlockIndex = PriorityEncoder(~VecInit(inflightReqs.map(req => req.valid)).asUInt)
161
162  val missRefillSelVec = VecInit(
163    (0 until LoadQueueSize).map{ i =>
164      val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(data(i).paddr)).reduce(_||_)
165      allocated(i) && miss(i) && !inflight
166    })
167
168  val missRefillSel = getFirstOne(missRefillSelVec, tailMask)
169  val missRefillBlockAddr = get_block_addr(data(missRefillSel).paddr)
170  io.dcache.req.valid := missRefillSelVec.asUInt.orR
171  io.dcache.req.bits.cmd := MemoryOpConstants.M_XRD
172  io.dcache.req.bits.addr := missRefillBlockAddr
173  io.dcache.req.bits.data := DontCare
174  io.dcache.req.bits.mask := DontCare
175
176  io.dcache.req.bits.meta.id       := DontCare // TODO: // FIXME
177  io.dcache.req.bits.meta.vaddr    := DontCare // data(missRefillSel).vaddr
178  io.dcache.req.bits.meta.paddr    := missRefillBlockAddr
179  io.dcache.req.bits.meta.uop      := uop(missRefillSel)
180  io.dcache.req.bits.meta.mmio     := false.B // data(missRefillSel).mmio
181  io.dcache.req.bits.meta.tlb_miss := false.B
182  io.dcache.req.bits.meta.mask     := DontCare
183  io.dcache.req.bits.meta.replay   := false.B
184
185  io.dcache.resp.ready := true.B
186
187  assert(!(data(missRefillSel).mmio && io.dcache.req.valid))
188
189  when(io.dcache.req.fire()) {
190    miss(missRefillSel) := false.B
191    listening(missRefillSel) := true.B
192
193    // mark this block as inflight
194    inflightReqs(reqBlockIndex).valid := true.B
195    inflightReqs(reqBlockIndex).block_addr := missRefillBlockAddr
196    assert(!inflightReqs(reqBlockIndex).valid)
197  }
198
199  when(io.dcache.resp.fire()) {
200    val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)).reduce(_||_)
201    assert(inflight)
202    for (i <- 0 until cfg.nLoadMissEntries) {
203      when (inflightReqs(i).valid && inflightReqs(i).block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)) {
204        inflightReqs(i).valid := false.B
205      }
206    }
207  }
208
209
210  when(io.dcache.req.fire()){
211    XSDebug("miss req: pc:0x%x roqIdx:%d lqIdx:%d (p)addr:0x%x vaddr:0x%x\n",
212      io.dcache.req.bits.meta.uop.cf.pc, io.dcache.req.bits.meta.uop.roqIdx.asUInt, io.dcache.req.bits.meta.uop.lqIdx.asUInt,
213      io.dcache.req.bits.addr, io.dcache.req.bits.meta.vaddr
214    )
215  }
216
217  when(io.dcache.resp.fire()){
218    XSDebug("miss resp: pc:0x%x roqIdx:%d lqIdx:%d (p)addr:0x%x data %x\n",
219      io.dcache.resp.bits.meta.uop.cf.pc, io.dcache.resp.bits.meta.uop.roqIdx.asUInt, io.dcache.resp.bits.meta.uop.lqIdx.asUInt,
220      io.dcache.resp.bits.meta.paddr, io.dcache.resp.bits.data
221    )
222  }
223
224  // Refill 64 bit in a cycle
225  // Refill data comes back from io.dcache.resp
226  def mergeRefillData(refill: UInt, fwd: UInt, fwdMask: UInt): UInt = {
227    val res = Wire(Vec(8, UInt(8.W)))
228    (0 until 8).foreach(i => {
229      res(i) := Mux(fwdMask(i), fwd(8 * (i + 1) - 1, 8 * i), refill(8 * (i + 1) - 1, 8 * i))
230    })
231    res.asUInt
232  }
233
234  (0 until LoadQueueSize).map(i => {
235    val blockMatch = get_block_addr(data(i).paddr) === io.dcache.resp.bits.meta.paddr
236    when(allocated(i) && listening(i) && blockMatch && io.dcache.resp.fire()) {
237      // split them into words
238      val words = VecInit((0 until blockWords) map { i =>
239        io.dcache.resp.bits.data(DataBits * (i + 1) - 1, DataBits * i)
240      })
241
242      val refillData = words(get_word(data(i).paddr))
243      data(i).data := mergeRefillData(refillData, data(i).fwdData.asUInt, data(i).fwdMask.asUInt)
244      valid(i) := true.B
245      listening(i) := false.B
246      XSDebug("miss resp: pos %d addr %x data %x + %x(%b)\n", i.U, data(i).paddr, refillData, data(i).fwdData.asUInt, data(i).fwdMask.asUInt)
247    }
248  })
249
250  // writeback up to 2 missed load insts to CDB
251  // just randomly pick 2 missed load (data refilled), write them back to cdb
252  val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => {
253    allocated(i) && valid(i) && !writebacked(i)
254  })).asUInt() // use uint instead vec to reduce verilog lines
255  val loadWbSel = Wire(Vec(StorePipelineWidth, UInt(log2Up(LoadQueueSize).W)))
256  val loadWbSelV= Wire(Vec(StorePipelineWidth, Bool()))
257  val lselvec0 = PriorityEncoderOH(loadWbSelVec)
258  val lselvec1 = PriorityEncoderOH(loadWbSelVec & (~lselvec0).asUInt)
259  loadWbSel(0) := OHToUInt(lselvec0)
260  loadWbSelV(0):= lselvec0.orR
261  loadWbSel(1) := OHToUInt(lselvec1)
262  loadWbSelV(1) := lselvec1.orR
263  (0 until StorePipelineWidth).map(i => {
264    // data select
265    val rdata = data(loadWbSel(i)).data
266    val func = uop(loadWbSel(i)).ctrl.fuOpType
267    val raddr = data(loadWbSel(i)).paddr
268    val rdataSel = LookupTree(raddr(2, 0), List(
269      "b000".U -> rdata(63, 0),
270      "b001".U -> rdata(63, 8),
271      "b010".U -> rdata(63, 16),
272      "b011".U -> rdata(63, 24),
273      "b100".U -> rdata(63, 32),
274      "b101".U -> rdata(63, 40),
275      "b110".U -> rdata(63, 48),
276      "b111".U -> rdata(63, 56)
277    ))
278    val rdataPartialLoad = LookupTree(func, List(
279        LSUOpType.lb   -> SignExt(rdataSel(7, 0) , XLEN),
280        LSUOpType.lh   -> SignExt(rdataSel(15, 0), XLEN),
281        LSUOpType.lw   -> SignExt(rdataSel(31, 0), XLEN),
282        LSUOpType.ld   -> SignExt(rdataSel(63, 0), XLEN),
283        LSUOpType.lbu  -> ZeroExt(rdataSel(7, 0) , XLEN),
284        LSUOpType.lhu  -> ZeroExt(rdataSel(15, 0), XLEN),
285        LSUOpType.lwu  -> ZeroExt(rdataSel(31, 0), XLEN),
286        LSUOpType.flw  -> boxF32ToF64(rdataSel(31, 0))
287    ))
288    io.ldout(i).bits.uop := uop(loadWbSel(i))
289    io.ldout(i).bits.uop.cf.exceptionVec := data(loadWbSel(i)).exception.asBools
290    io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr)
291    io.ldout(i).bits.data := rdataPartialLoad
292    io.ldout(i).bits.redirectValid := false.B
293    io.ldout(i).bits.redirect := DontCare
294    io.ldout(i).bits.brUpdate := DontCare
295    io.ldout(i).bits.debug.isMMIO := data(loadWbSel(i)).mmio
296    io.ldout(i).bits.fflags := DontCare
297    io.ldout(i).valid := loadWbSelVec(loadWbSel(i)) && loadWbSelV(i)
298    when(io.ldout(i).fire()) {
299      writebacked(loadWbSel(i)) := true.B
300      XSInfo("load miss write to cbd roqidx %d lqidx %d pc 0x%x paddr %x data %x mmio %x\n",
301        io.ldout(i).bits.uop.roqIdx.asUInt,
302        io.ldout(i).bits.uop.lqIdx.asUInt,
303        io.ldout(i).bits.uop.cf.pc,
304        data(loadWbSel(i)).paddr,
305        data(loadWbSel(i)).data,
306        data(loadWbSel(i)).mmio
307      )
308    }
309  })
310
311  // move tailPtr
312  // allocatedMask: dequeuePtr can go to the next 1-bit
313  val allocatedMask = VecInit((0 until LoadQueueSize).map(i => allocated(i) || !enqDeqMask(i)))
314  // find the first one from deqPtr (ringBufferTail)
315  val nextTail1 = getFirstOneWithFlag(allocatedMask, tailMask, ringBufferTailExtended.flag)
316  val nextTail = Mux(Cat(allocatedMask).orR, nextTail1, ringBufferHeadExtended)
317  ringBufferTailExtended := nextTail
318
319  // When load commited, mark it as !allocated, this entry will be recycled later
320  (0 until CommitWidth).map(i => {
321    when(loadCommit(i)) {
322      allocated(mcommitIdx(i)) := false.B
323      XSDebug("load commit %d: idx %d %x\n", i.U, mcommitIdx(i), uop(mcommitIdx(i)).cf.pc)
324    }
325  })
326
327  // rollback check
328  val rollback = Wire(Vec(StorePipelineWidth, Valid(new Redirect)))
329
330  def getFirstOne(mask: Vec[Bool], startMask: UInt) = {
331    val length = mask.length
332    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
333    val highBitsUint = Cat(highBits.reverse)
334    PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt))
335  }
336
337  def getFirstOneWithFlag(mask: Vec[Bool], startMask: UInt, startFlag: Bool) = {
338    val length = mask.length
339    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
340    val highBitsUint = Cat(highBits.reverse)
341    val changeDirection = !highBitsUint.orR()
342    val index = PriorityEncoder(Mux(!changeDirection, highBitsUint, mask.asUInt))
343    LqPtr(startFlag ^ changeDirection, index)
344  }
345
346  def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = {
347    assert(valid.length == uop.length)
348    assert(valid.length == 2)
349    Mux(valid(0) && valid(1),
350      Mux(isAfter(uop(0).roqIdx, uop(1).roqIdx), uop(1), uop(0)),
351      Mux(valid(0) && !valid(1), uop(0), uop(1)))
352  }
353
354  def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = {
355    assert(valid.length == uop.length)
356    val length = valid.length
357    (0 until length).map(i => {
358      (0 until length).map(j => {
359        Mux(valid(i) && valid(j),
360          isAfter(uop(i).roqIdx, uop(j).roqIdx),
361          Mux(!valid(i), true.B, false.B))
362      })
363    })
364  }
365
366  def rangeMask(start: LqPtr, end: LqPtr): UInt = {
367    val startMask = (1.U((LoadQueueSize + 1).W) << start.value).asUInt - 1.U
368    val endMask = (1.U((LoadQueueSize + 1).W) << end.value).asUInt - 1.U
369    val xorMask = startMask(LoadQueueSize - 1, 0) ^ endMask(LoadQueueSize - 1, 0)
370    Mux(start.flag === end.flag, xorMask, ~xorMask)
371  }
372
373  // ignore data forward
374  (0 until LoadPipelineWidth).foreach(i => {
375    io.forward(i).forwardMask := DontCare
376    io.forward(i).forwardData := DontCare
377  })
378
379  // store backward query and rollback
380  //  val needCheck = Seq.fill(8)(WireInit(true.B))
381  (0 until StorePipelineWidth).foreach(i => {
382    rollback(i) := DontCare
383
384    when(io.storeIn(i).valid) {
385      val startIndex = io.storeIn(i).bits.uop.lqIdx.value
386      val lqIdxMask = ((1.U((LoadQueueSize + 1).W) << startIndex).asUInt - 1.U)(LoadQueueSize - 1, 0)
387      val xorMask = lqIdxMask ^ headMask
388      val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === ringBufferHeadExtended.flag
389      val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
390
391      // check if load already in lq needs to be rolledback
392      val lqViolationVec = VecInit((0 until LoadQueueSize).map(j => {
393        val addrMatch = allocated(j) &&
394          io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === data(j).paddr(PAddrBits - 1, 3)
395        val entryNeedCheck = toEnqPtrMask(j) && addrMatch && (valid(j) || listening(j) || miss(j))
396        // TODO: update refilled data
397        val violationVec = (0 until 8).map(k => data(j).mask(k) && io.storeIn(i).bits.mask(k))
398        Cat(violationVec).orR() && entryNeedCheck
399      }))
400      val lqViolation = lqViolationVec.asUInt().orR()
401      val lqViolationIndex = getFirstOne(lqViolationVec, lqIdxMask)
402      val lqViolationUop = uop(lqViolationIndex)
403      XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n")
404
405      // when l/s writeback to roq together, check if rollback is needed
406      val wbViolationVec = VecInit((0 until LoadPipelineWidth).map(j => {
407        io.loadIn(j).valid &&
408          isAfter(io.loadIn(j).bits.uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
409          io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) &&
410          (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR
411      }))
412      val wbViolation = wbViolationVec.asUInt().orR()
413      val wbViolationUop = getOldestInTwo(wbViolationVec, io.loadIn.map(_.bits.uop))
414      XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n")
415
416      // check if rollback is needed for load in l1
417      val l1ViolationVec = VecInit((0 until LoadPipelineWidth).map(j => {
418        io.forward(j).valid && // L4 valid\
419          isAfter(io.forward(j).uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
420          io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.forward(j).paddr(PAddrBits - 1, 3) &&
421          (io.storeIn(i).bits.mask & io.forward(j).mask).orR
422      }))
423      val l1Violation = l1ViolationVec.asUInt().orR()
424      val l1ViolationUop = getOldestInTwo(l1ViolationVec, io.forward.map(_.uop))
425      XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n")
426
427      val rollbackValidVec = Seq(lqViolation, wbViolation, l1Violation)
428      val rollbackUopVec = Seq(lqViolationUop, wbViolationUop, l1ViolationUop)
429      rollback(i).valid := Cat(rollbackValidVec).orR
430      val mask = getAfterMask(rollbackValidVec, rollbackUopVec)
431      val oneAfterZero = mask(1)(0)
432      val rollbackUop = Mux(oneAfterZero && mask(2)(0),
433        rollbackUopVec(0),
434        Mux(!oneAfterZero && mask(2)(1), rollbackUopVec(1), rollbackUopVec(2)))
435      rollback(i).bits.roqIdx := rollbackUop.roqIdx - 1.U
436
437      rollback(i).bits.isReplay := true.B
438      rollback(i).bits.isMisPred := false.B
439      rollback(i).bits.isException := false.B
440      rollback(i).bits.isFlushPipe := false.B
441
442      XSDebug(
443        l1Violation,
444        "need rollback (l4 load) pc %x roqidx %d target %x\n",
445        io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, l1ViolationUop.roqIdx.asUInt
446      )
447      XSDebug(
448        lqViolation,
449        "need rollback (ld wb before store) pc %x roqidx %d target %x\n",
450        io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, lqViolationUop.roqIdx.asUInt
451      )
452      XSDebug(
453        wbViolation,
454        "need rollback (ld/st wb together) pc %x roqidx %d target %x\n",
455        io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, wbViolationUop.roqIdx.asUInt
456      )
457    }.otherwise {
458      rollback(i).valid := false.B
459    }
460  })
461
462  def rollbackSel(a: Valid[Redirect], b: Valid[Redirect]): ValidIO[Redirect] = {
463    Mux(
464      a.valid,
465      Mux(
466        b.valid,
467        Mux(isAfter(a.bits.roqIdx, b.bits.roqIdx), b, a), // a,b both valid, sel oldest
468        a // sel a
469      ),
470      b // sel b
471    )
472  }
473
474  io.rollback := ParallelOperation(rollback, rollbackSel)
475
476  // Memory mapped IO / other uncached operations
477
478  // setup misc mem access req
479  // mask / paddr / data can be get from lq.data
480  val commitType = io.commits(0).bits.uop.ctrl.commitType
481  io.uncache.req.valid := pending(ringBufferTail) && allocated(ringBufferTail) &&
482    commitType === CommitType.LOAD &&
483    io.roqDeqPtr === uop(ringBufferTail).roqIdx &&
484    !io.commits(0).bits.isWalk
485
486  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
487  io.uncache.req.bits.addr := data(ringBufferTail).paddr
488  io.uncache.req.bits.data := data(ringBufferTail).data
489  io.uncache.req.bits.mask := data(ringBufferTail).mask
490
491  io.uncache.req.bits.meta.id       := DontCare // TODO: // FIXME
492  io.uncache.req.bits.meta.vaddr    := DontCare
493  io.uncache.req.bits.meta.paddr    := data(ringBufferTail).paddr
494  io.uncache.req.bits.meta.uop      := uop(ringBufferTail)
495  io.uncache.req.bits.meta.mmio     := true.B // data(ringBufferTail).mmio
496  io.uncache.req.bits.meta.tlb_miss := false.B
497  io.uncache.req.bits.meta.mask     := data(ringBufferTail).mask
498  io.uncache.req.bits.meta.replay   := false.B
499
500  io.uncache.resp.ready := true.B
501
502  when(io.uncache.req.fire()){
503    pending(ringBufferTail) := false.B
504  }
505
506  when(io.uncache.resp.fire()){
507    valid(ringBufferTail) := true.B
508    data(ringBufferTail).data := io.uncache.resp.bits.data(XLEN-1, 0)
509    // TODO: write back exception info
510  }
511
512  when(io.uncache.req.fire()){
513    XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n",
514      uop(ringBufferTail).cf.pc,
515      io.uncache.req.bits.addr,
516      io.uncache.req.bits.data,
517      io.uncache.req.bits.cmd,
518      io.uncache.req.bits.mask
519    )
520  }
521
522  when(io.uncache.resp.fire()){
523    XSDebug("uncache resp: data %x\n", io.dcache.resp.bits.data)
524  }
525
526  // Read vaddr for mem exception
527  io.exceptionAddr.vaddr := data(io.exceptionAddr.lsIdx.lqIdx.value).vaddr
528
529  // misprediction recovery / exception redirect
530  // invalidate lq term using robIdx
531  val needCancel = Wire(Vec(LoadQueueSize, Bool()))
532  for (i <- 0 until LoadQueueSize) {
533    needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect) && allocated(i) && !commited(i)
534    when(needCancel(i)) {
535      when(io.brqRedirect.bits.isReplay){
536        valid(i) := false.B
537        writebacked(i) := false.B
538        listening(i) := false.B
539        miss(i) := false.B
540        pending(i) := false.B
541      }.otherwise{
542        allocated(i) := false.B
543      }
544    }
545  }
546  when (io.brqRedirect.valid && io.brqRedirect.bits.isMisPred) {
547    ringBufferHeadExtended := ringBufferHeadExtended - PopCount(needCancel)
548  }
549
550  // assert(!io.rollback.valid)
551  when(io.rollback.valid) {
552    XSDebug("Mem rollback: pc %x roqidx %d\n", io.rollback.bits.pc, io.rollback.bits.roqIdx.asUInt)
553  }
554
555  // debug info
556  XSDebug("head %d:%d tail %d:%d\n", ringBufferHeadExtended.flag, ringBufferHead, ringBufferTailExtended.flag, ringBufferTail)
557
558  def PrintFlag(flag: Bool, name: String): Unit = {
559    when(flag) {
560      XSDebug(false, true.B, name)
561    }.otherwise {
562      XSDebug(false, true.B, " ")
563    }
564  }
565
566  for (i <- 0 until LoadQueueSize) {
567    if (i % 4 == 0) XSDebug("")
568    XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, data(i).paddr)
569    PrintFlag(allocated(i), "a")
570    PrintFlag(allocated(i) && valid(i), "v")
571    PrintFlag(allocated(i) && writebacked(i), "w")
572    PrintFlag(allocated(i) && commited(i), "c")
573    PrintFlag(allocated(i) && miss(i), "m")
574    PrintFlag(allocated(i) && listening(i), "l")
575    PrintFlag(allocated(i) && pending(i), "p")
576    XSDebug(false, true.B, " ")
577    if (i % 4 == 3 || i == LoadQueueSize - 1) XSDebug(false, true.B, "\n")
578  }
579
580}
581