xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueue.scala (revision 579b9f28762b9000ab852a29357f1dcc0e1636a5)
1package xiangshan.mem
2
3import chisel3._
4import chisel3.util._
5import freechips.rocketchip.tile.HasFPUParameters
6import utils._
7import xiangshan._
8import xiangshan.cache._
9import xiangshan.cache.{DCacheLineIO, DCacheWordIO, MemoryOpConstants, TlbRequestIO}
10import xiangshan.backend.LSUOpType
11import xiangshan.mem._
12import xiangshan.backend.roq.RoqPtr
13import xiangshan.backend.fu.fpu.boxF32ToF64
14
15
16class LqPtr extends CircularQueuePtr(LqPtr.LoadQueueSize) { }
17
18object LqPtr extends HasXSParameter {
19  def apply(f: Bool, v: UInt): LqPtr = {
20    val ptr = Wire(new LqPtr)
21    ptr.flag := f
22    ptr.value := v
23    ptr
24  }
25}
26
27trait HasLoadHelper { this: XSModule =>
28  def rdataHelper(uop: MicroOp, rdata: UInt): UInt = {
29    val lwIntData = SignExt(rdata(31, 0), XLEN)
30    val ldIntData = SignExt(rdata(63, 0), XLEN)
31    val lwFpData = recode(rdata(31, 0), S)
32    val ldFpData = recode(rdata(63, 0), D)
33    val fpWen = uop.ctrl.fpWen
34    LookupTree(uop.ctrl.fuOpType, List(
35      LSUOpType.lb   -> SignExt(rdata(7, 0) , XLEN),
36      LSUOpType.lh   -> SignExt(rdata(15, 0), XLEN),
37      LSUOpType.lw   -> Mux(fpWen, lwFpData, lwIntData),
38      LSUOpType.ld   -> Mux(fpWen, ldFpData, ldIntData),
39      LSUOpType.lbu  -> ZeroExt(rdata(7, 0) , XLEN),
40      LSUOpType.lhu  -> ZeroExt(rdata(15, 0), XLEN),
41      LSUOpType.lwu  -> ZeroExt(rdata(31, 0), XLEN),
42    ))
43  }
44}
45
46
47// Load Queue
48class LoadQueue extends XSModule
49  with HasDCacheParameters
50  with HasCircularQueuePtrHelper
51  with HasLoadHelper
52{
53  val io = IO(new Bundle() {
54    val enq = new Bundle() {
55      val canAccept = Output(Bool())
56      val req = Vec(RenameWidth, Flipped(ValidIO(new MicroOp)))
57      val resp = Vec(RenameWidth, Output(new LqPtr))
58    }
59    val brqRedirect = Input(Valid(new Redirect))
60    val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle)))
61    val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) // FIXME: Valid() only
62    val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback load
63    val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
64    val commits = Flipped(Vec(CommitWidth, Valid(new RoqCommit)))
65    val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store
66    val dcache = new DCacheLineIO
67    val uncache = new DCacheWordIO
68    val roqDeqPtr = Input(new RoqPtr)
69    val exceptionAddr = new ExceptionAddrIO
70    // val refill = Flipped(Valid(new DCacheLineReq ))
71  })
72
73  val uop = Reg(Vec(LoadQueueSize, new MicroOp))
74  // val data = Reg(Vec(LoadQueueSize, new LsRoqEntry))
75  val dataModule = Module(new LSQueueData(LoadQueueSize, LoadPipelineWidth))
76  dataModule.io := DontCare
77  val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated
78  val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid
79  val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
80  val commited = Reg(Vec(LoadQueueSize, Bool())) // inst has been writebacked to CDB
81  val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request
82  val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result
83  val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq
84
85  val enqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
86  val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
87  val enqPtr = enqPtrExt.value
88  val deqPtr = deqPtrExt.value
89  val sameFlag = enqPtrExt.flag === deqPtrExt.flag
90  val isEmpty = enqPtr === deqPtr && sameFlag
91  val isFull = enqPtr === deqPtr && !sameFlag
92  val allowIn = !isFull
93
94  val loadCommit = (0 until CommitWidth).map(i => io.commits(i).valid && !io.commits(i).bits.isWalk && io.commits(i).bits.uop.ctrl.commitType === CommitType.LOAD)
95  val mcommitIdx = (0 until CommitWidth).map(i => io.commits(i).bits.uop.lqIdx.value)
96
97  val tailMask = (((1.U((LoadQueueSize + 1).W)) << deqPtr).asUInt - 1.U)(LoadQueueSize - 1, 0)
98  val headMask = (((1.U((LoadQueueSize + 1).W)) << enqPtr).asUInt - 1.U)(LoadQueueSize - 1, 0)
99  val enqDeqMask1 = tailMask ^ headMask
100  val enqDeqMask = Mux(sameFlag, enqDeqMask1, ~enqDeqMask1)
101
102  // Enqueue at dispatch
103  val validEntries = distanceBetween(enqPtrExt, deqPtrExt)
104  val firedDispatch = io.enq.req.map(_.valid)
105  io.enq.canAccept := validEntries <= (LoadQueueSize - RenameWidth).U
106  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(firedDispatch))}\n")
107  for (i <- 0 until RenameWidth) {
108    val offset = if (i == 0) 0.U else PopCount((0 until i).map(firedDispatch(_)))
109    val lqIdx = enqPtrExt + offset
110    val index = lqIdx.value
111    when(io.enq.req(i).valid) {
112      uop(index) := io.enq.req(i).bits
113      allocated(index) := true.B
114      datavalid(index) := false.B
115      writebacked(index) := false.B
116      commited(index) := false.B
117      miss(index) := false.B
118      listening(index) := false.B
119      pending(index) := false.B
120    }
121    io.enq.resp(i) := lqIdx
122
123    XSError(!io.enq.canAccept && io.enq.req(i).valid, "should not valid when not ready\n")
124  }
125
126  when(Cat(firedDispatch).orR) {
127    enqPtrExt := enqPtrExt + PopCount(firedDispatch)
128    XSInfo("dispatched %d insts to lq\n", PopCount(firedDispatch))
129  }
130
131  // writeback load
132  (0 until LoadPipelineWidth).map(i => {
133    dataModule.io.wb(i).wen := false.B
134    when(io.loadIn(i).fire()) {
135      when(io.loadIn(i).bits.miss) {
136        XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n",
137          io.loadIn(i).bits.uop.lqIdx.asUInt,
138          io.loadIn(i).bits.uop.cf.pc,
139          io.loadIn(i).bits.vaddr,
140          io.loadIn(i).bits.paddr,
141          io.loadIn(i).bits.data,
142          io.loadIn(i).bits.mask,
143          io.loadIn(i).bits.forwardData.asUInt,
144          io.loadIn(i).bits.forwardMask.asUInt,
145          io.loadIn(i).bits.mmio,
146          io.loadIn(i).bits.rollback,
147          io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
148          )
149        }.otherwise {
150          XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n",
151          io.loadIn(i).bits.uop.lqIdx.asUInt,
152          io.loadIn(i).bits.uop.cf.pc,
153          io.loadIn(i).bits.vaddr,
154          io.loadIn(i).bits.paddr,
155          io.loadIn(i).bits.data,
156          io.loadIn(i).bits.mask,
157          io.loadIn(i).bits.forwardData.asUInt,
158          io.loadIn(i).bits.forwardMask.asUInt,
159          io.loadIn(i).bits.mmio,
160          io.loadIn(i).bits.rollback,
161          io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
162          )
163        }
164        val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
165        datavalid(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
166        writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
167        allocated(loadWbIndex) := !io.loadIn(i).bits.uop.cf.exceptionVec.asUInt.orR
168
169        val loadWbData = Wire(new LsqEntry)
170        loadWbData.paddr := io.loadIn(i).bits.paddr
171        loadWbData.vaddr := io.loadIn(i).bits.vaddr
172        loadWbData.mask := io.loadIn(i).bits.mask
173        loadWbData.data := io.loadIn(i).bits.data // for mmio / misc / debug
174        loadWbData.mmio := io.loadIn(i).bits.mmio
175        loadWbData.fwdMask := io.loadIn(i).bits.forwardMask
176        loadWbData.fwdData := io.loadIn(i).bits.forwardData
177        loadWbData.exception := io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
178        dataModule.io.wbWrite(i, loadWbIndex, loadWbData)
179        dataModule.io.wb(i).wen := true.B
180
181        val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
182        miss(loadWbIndex) := dcacheMissed
183        listening(loadWbIndex) := dcacheMissed
184        pending(loadWbIndex) := io.loadIn(i).bits.mmio
185      }
186    })
187
188  // cache miss request
189  val inflightReqs = RegInit(VecInit(Seq.fill(cfg.nLoadMissEntries)(0.U.asTypeOf(new InflightBlockInfo))))
190  val inflightReqFull = inflightReqs.map(req => req.valid).reduce(_&&_)
191  val reqBlockIndex = PriorityEncoder(~VecInit(inflightReqs.map(req => req.valid)).asUInt)
192
193  val missRefillSelVec = VecInit(
194    (0 until LoadQueueSize).map{ i =>
195      val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(dataModule.io.rdata(i).paddr)).reduce(_||_)
196      allocated(i) && miss(i) && !inflight
197    })
198
199  val missRefillSel = getFirstOne(missRefillSelVec, tailMask)
200  val missRefillBlockAddr = get_block_addr(dataModule.io.rdata(missRefillSel).paddr)
201  io.dcache.req.valid := missRefillSelVec.asUInt.orR
202  io.dcache.req.bits.cmd := MemoryOpConstants.M_XRD
203  io.dcache.req.bits.addr := missRefillBlockAddr
204  io.dcache.req.bits.data := DontCare
205  io.dcache.req.bits.mask := DontCare
206
207  io.dcache.req.bits.meta.id       := DontCare
208  io.dcache.req.bits.meta.vaddr    := DontCare // dataModule.io.rdata(missRefillSel).vaddr
209  io.dcache.req.bits.meta.paddr    := missRefillBlockAddr
210  io.dcache.req.bits.meta.uop      := uop(missRefillSel)
211  io.dcache.req.bits.meta.mmio     := false.B // dataModule.io.rdata(missRefillSel).mmio
212  io.dcache.req.bits.meta.tlb_miss := false.B
213  io.dcache.req.bits.meta.mask     := DontCare
214  io.dcache.req.bits.meta.replay   := false.B
215
216  io.dcache.resp.ready := true.B
217
218  assert(!(dataModule.io.rdata(missRefillSel).mmio && io.dcache.req.valid))
219
220  when(io.dcache.req.fire()) {
221    miss(missRefillSel) := false.B
222    listening(missRefillSel) := true.B
223
224    // mark this block as inflight
225    inflightReqs(reqBlockIndex).valid := true.B
226    inflightReqs(reqBlockIndex).block_addr := missRefillBlockAddr
227    assert(!inflightReqs(reqBlockIndex).valid)
228  }
229
230  when(io.dcache.resp.fire()) {
231    val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)).reduce(_||_)
232    assert(inflight)
233    for (i <- 0 until cfg.nLoadMissEntries) {
234      when (inflightReqs(i).valid && inflightReqs(i).block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)) {
235        inflightReqs(i).valid := false.B
236      }
237    }
238  }
239
240
241  when(io.dcache.req.fire()){
242    XSDebug("miss req: pc:0x%x roqIdx:%d lqIdx:%d (p)addr:0x%x vaddr:0x%x\n",
243      io.dcache.req.bits.meta.uop.cf.pc, io.dcache.req.bits.meta.uop.roqIdx.asUInt, io.dcache.req.bits.meta.uop.lqIdx.asUInt,
244      io.dcache.req.bits.addr, io.dcache.req.bits.meta.vaddr
245    )
246  }
247
248  when(io.dcache.resp.fire()){
249    XSDebug("miss resp: pc:0x%x roqIdx:%d lqIdx:%d (p)addr:0x%x data %x\n",
250      io.dcache.resp.bits.meta.uop.cf.pc, io.dcache.resp.bits.meta.uop.roqIdx.asUInt, io.dcache.resp.bits.meta.uop.lqIdx.asUInt,
251      io.dcache.resp.bits.meta.paddr, io.dcache.resp.bits.data
252    )
253  }
254
255  // Refill 64 bit in a cycle
256  // Refill data comes back from io.dcache.resp
257  dataModule.io.refill.dcache := io.dcache.resp.bits
258
259  (0 until LoadQueueSize).map(i => {
260    val blockMatch = get_block_addr(dataModule.io.rdata(i).paddr) === io.dcache.resp.bits.meta.paddr
261    dataModule.io.refill.wen(i) := false.B
262    when(allocated(i) && listening(i) && blockMatch && io.dcache.resp.fire()) {
263      dataModule.io.refill.wen(i) := true.B
264      datavalid(i) := true.B
265      listening(i) := false.B
266    }
267  })
268
269  // writeback up to 2 missed load insts to CDB
270  // just randomly pick 2 missed load (data refilled), write them back to cdb
271  val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => {
272    allocated(i) && datavalid(i) && !writebacked(i)
273  })).asUInt() // use uint instead vec to reduce verilog lines
274  val loadWbSel = Wire(Vec(StorePipelineWidth, UInt(log2Up(LoadQueueSize).W)))
275  val loadWbSelV= Wire(Vec(StorePipelineWidth, Bool()))
276  val lselvec0 = PriorityEncoderOH(loadWbSelVec)
277  val lselvec1 = PriorityEncoderOH(loadWbSelVec & (~lselvec0).asUInt)
278  loadWbSel(0) := OHToUInt(lselvec0)
279  loadWbSelV(0):= lselvec0.orR
280  loadWbSel(1) := OHToUInt(lselvec1)
281  loadWbSelV(1) := lselvec1.orR
282  (0 until StorePipelineWidth).map(i => {
283    // data select
284    val rdata = dataModule.io.rdata(loadWbSel(i)).data
285    val func = uop(loadWbSel(i)).ctrl.fuOpType
286    val raddr = dataModule.io.rdata(loadWbSel(i)).paddr
287    val rdataSel = LookupTree(raddr(2, 0), List(
288      "b000".U -> rdata(63, 0),
289      "b001".U -> rdata(63, 8),
290      "b010".U -> rdata(63, 16),
291      "b011".U -> rdata(63, 24),
292      "b100".U -> rdata(63, 32),
293      "b101".U -> rdata(63, 40),
294      "b110".U -> rdata(63, 48),
295      "b111".U -> rdata(63, 56)
296    ))
297    val rdataPartialLoad = rdataHelper(uop(loadWbSel(i)), rdataSel)
298    io.ldout(i).bits.uop := uop(loadWbSel(i))
299    io.ldout(i).bits.uop.cf.exceptionVec := dataModule.io.rdata(loadWbSel(i)).exception.asBools
300    io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr)
301    io.ldout(i).bits.data := rdataPartialLoad
302    io.ldout(i).bits.redirectValid := false.B
303    io.ldout(i).bits.redirect := DontCare
304    io.ldout(i).bits.brUpdate := DontCare
305    io.ldout(i).bits.debug.isMMIO := dataModule.io.rdata(loadWbSel(i)).mmio
306    io.ldout(i).bits.fflags := DontCare
307    io.ldout(i).valid := loadWbSelVec(loadWbSel(i)) && loadWbSelV(i)
308    when(io.ldout(i).fire()) {
309      writebacked(loadWbSel(i)) := true.B
310      XSInfo("load miss write to cbd roqidx %d lqidx %d pc 0x%x paddr %x data %x mmio %x\n",
311        io.ldout(i).bits.uop.roqIdx.asUInt,
312        io.ldout(i).bits.uop.lqIdx.asUInt,
313        io.ldout(i).bits.uop.cf.pc,
314        dataModule.io.rdata(loadWbSel(i)).paddr,
315        dataModule.io.rdata(loadWbSel(i)).data,
316        dataModule.io.rdata(loadWbSel(i)).mmio
317      )
318    }
319  })
320
321  // move tailPtr
322  // allocatedMask: dequeuePtr can go to the next 1-bit
323  val allocatedMask = VecInit((0 until LoadQueueSize).map(i => allocated(i) || !enqDeqMask(i)))
324  // find the first one from deqPtr (deqPtr)
325  val nextTail1 = getFirstOneWithFlag(allocatedMask, tailMask, deqPtrExt.flag)
326  val nextTail = Mux(Cat(allocatedMask).orR, nextTail1, enqPtrExt)
327  deqPtrExt := nextTail
328
329  // When load commited, mark it as !allocated, this entry will be recycled later
330  (0 until CommitWidth).map(i => {
331    when(loadCommit(i)) {
332      allocated(mcommitIdx(i)) := false.B
333      XSDebug("load commit %d: idx %d %x\n", i.U, mcommitIdx(i), uop(mcommitIdx(i)).cf.pc)
334    }
335  })
336
337  // rollback check
338  val rollback = Wire(Vec(StorePipelineWidth, Valid(new Redirect)))
339
340  def getFirstOne(mask: Vec[Bool], startMask: UInt) = {
341    val length = mask.length
342    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
343    val highBitsUint = Cat(highBits.reverse)
344    PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt))
345  }
346
347  def getFirstOneWithFlag(mask: Vec[Bool], startMask: UInt, startFlag: Bool) = {
348    val length = mask.length
349    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
350    val highBitsUint = Cat(highBits.reverse)
351    val changeDirection = !highBitsUint.orR()
352    val index = PriorityEncoder(Mux(!changeDirection, highBitsUint, mask.asUInt))
353    LqPtr(startFlag ^ changeDirection, index)
354  }
355
356  def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = {
357    assert(valid.length == uop.length)
358    assert(valid.length == 2)
359    Mux(valid(0) && valid(1),
360      Mux(isAfter(uop(0).roqIdx, uop(1).roqIdx), uop(1), uop(0)),
361      Mux(valid(0) && !valid(1), uop(0), uop(1)))
362  }
363
364  def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = {
365    assert(valid.length == uop.length)
366    val length = valid.length
367    (0 until length).map(i => {
368      (0 until length).map(j => {
369        Mux(valid(i) && valid(j),
370          isAfter(uop(i).roqIdx, uop(j).roqIdx),
371          Mux(!valid(i), true.B, false.B))
372      })
373    })
374  }
375
376  def rangeMask(start: LqPtr, end: LqPtr): UInt = {
377    val startMask = (1.U((LoadQueueSize + 1).W) << start.value).asUInt - 1.U
378    val endMask = (1.U((LoadQueueSize + 1).W) << end.value).asUInt - 1.U
379    val xorMask = startMask(LoadQueueSize - 1, 0) ^ endMask(LoadQueueSize - 1, 0)
380    Mux(start.flag === end.flag, xorMask, ~xorMask)
381  }
382
383  // ignore data forward
384  (0 until LoadPipelineWidth).foreach(i => {
385    io.forward(i).forwardMask := DontCare
386    io.forward(i).forwardData := DontCare
387  })
388
389  // store backward query and rollback
390  //  val needCheck = Seq.fill(8)(WireInit(true.B))
391  (0 until StorePipelineWidth).foreach(i => {
392    rollback(i) := DontCare
393
394    when(io.storeIn(i).valid) {
395      val startIndex = io.storeIn(i).bits.uop.lqIdx.value
396      val lqIdxMask = ((1.U((LoadQueueSize + 1).W) << startIndex).asUInt - 1.U)(LoadQueueSize - 1, 0)
397      val xorMask = lqIdxMask ^ headMask
398      val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt.flag
399      val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
400
401      // check if load already in lq needs to be rolledback
402      val lqViolationVec = VecInit((0 until LoadQueueSize).map(j => {
403        val addrMatch = allocated(j) &&
404          io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === dataModule.io.rdata(j).paddr(PAddrBits - 1, 3)
405        val entryNeedCheck = toEnqPtrMask(j) && addrMatch && (datavalid(j) || listening(j) || miss(j))
406        // TODO: update refilled data
407        val violationVec = (0 until 8).map(k => dataModule.io.rdata(j).mask(k) && io.storeIn(i).bits.mask(k))
408        Cat(violationVec).orR() && entryNeedCheck
409      }))
410      val lqViolation = lqViolationVec.asUInt().orR()
411      val lqViolationIndex = getFirstOne(lqViolationVec, lqIdxMask)
412      val lqViolationUop = uop(lqViolationIndex)
413      XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n")
414
415      // when l/s writeback to roq together, check if rollback is needed
416      val wbViolationVec = VecInit((0 until LoadPipelineWidth).map(j => {
417        io.loadIn(j).valid &&
418          isAfter(io.loadIn(j).bits.uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
419          io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) &&
420          (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR
421      }))
422      val wbViolation = wbViolationVec.asUInt().orR()
423      val wbViolationUop = getOldestInTwo(wbViolationVec, io.loadIn.map(_.bits.uop))
424      XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n")
425
426      // check if rollback is needed for load in l1
427      val l1ViolationVec = VecInit((0 until LoadPipelineWidth).map(j => {
428        io.forward(j).valid && // L4 valid\
429          isAfter(io.forward(j).uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
430          io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.forward(j).paddr(PAddrBits - 1, 3) &&
431          (io.storeIn(i).bits.mask & io.forward(j).mask).orR
432      }))
433      val l1Violation = l1ViolationVec.asUInt().orR()
434      val l1ViolationUop = getOldestInTwo(l1ViolationVec, io.forward.map(_.uop))
435      XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n")
436
437      val rollbackValidVec = Seq(lqViolation, wbViolation, l1Violation)
438      val rollbackUopVec = Seq(lqViolationUop, wbViolationUop, l1ViolationUop)
439      rollback(i).valid := Cat(rollbackValidVec).orR
440      val mask = getAfterMask(rollbackValidVec, rollbackUopVec)
441      val oneAfterZero = mask(1)(0)
442      val rollbackUop = Mux(oneAfterZero && mask(2)(0),
443        rollbackUopVec(0),
444        Mux(!oneAfterZero && mask(2)(1), rollbackUopVec(1), rollbackUopVec(2)))
445      rollback(i).bits.roqIdx := rollbackUop.roqIdx - 1.U
446
447      rollback(i).bits.isReplay := true.B
448      rollback(i).bits.isMisPred := false.B
449      rollback(i).bits.isException := false.B
450      rollback(i).bits.isFlushPipe := false.B
451
452      XSDebug(
453        l1Violation,
454        "need rollback (l4 load) pc %x roqidx %d target %x\n",
455        io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, l1ViolationUop.roqIdx.asUInt
456      )
457      XSDebug(
458        lqViolation,
459        "need rollback (ld wb before store) pc %x roqidx %d target %x\n",
460        io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, lqViolationUop.roqIdx.asUInt
461      )
462      XSDebug(
463        wbViolation,
464        "need rollback (ld/st wb together) pc %x roqidx %d target %x\n",
465        io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, wbViolationUop.roqIdx.asUInt
466      )
467    }.otherwise {
468      rollback(i).valid := false.B
469    }
470  })
471
472  def rollbackSel(a: Valid[Redirect], b: Valid[Redirect]): ValidIO[Redirect] = {
473    Mux(
474      a.valid,
475      Mux(
476        b.valid,
477        Mux(isAfter(a.bits.roqIdx, b.bits.roqIdx), b, a), // a,b both valid, sel oldest
478        a // sel a
479      ),
480      b // sel b
481    )
482  }
483
484  io.rollback := ParallelOperation(rollback, rollbackSel)
485
486  // Memory mapped IO / other uncached operations
487
488  // setup misc mem access req
489  // mask / paddr / data can be get from lq.data
490  val commitType = io.commits(0).bits.uop.ctrl.commitType
491  io.uncache.req.valid := pending(deqPtr) && allocated(deqPtr) &&
492    commitType === CommitType.LOAD &&
493    io.roqDeqPtr === uop(deqPtr).roqIdx &&
494    !io.commits(0).bits.isWalk
495
496  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
497  io.uncache.req.bits.addr := dataModule.io.rdata(deqPtr).paddr
498  io.uncache.req.bits.data := dataModule.io.rdata(deqPtr).data
499  io.uncache.req.bits.mask := dataModule.io.rdata(deqPtr).mask
500
501  io.uncache.req.bits.meta.id       := DontCare // TODO: // FIXME
502  io.uncache.req.bits.meta.vaddr    := DontCare
503  io.uncache.req.bits.meta.paddr    := dataModule.io.rdata(deqPtr).paddr
504  io.uncache.req.bits.meta.uop      := uop(deqPtr)
505  io.uncache.req.bits.meta.mmio     := true.B // dataModule.io.rdata(deqPtr).mmio
506  io.uncache.req.bits.meta.tlb_miss := false.B
507  io.uncache.req.bits.meta.mask     := dataModule.io.rdata(deqPtr).mask
508  io.uncache.req.bits.meta.replay   := false.B
509
510  io.uncache.resp.ready := true.B
511
512  when(io.uncache.req.fire()){
513    pending(deqPtr) := false.B
514  }
515
516  dataModule.io.uncache.wen := false.B
517  when(io.uncache.resp.fire()){
518    datavalid(deqPtr) := true.B
519    dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0))
520    dataModule.io.uncache.wen := true.B
521    // TODO: write back exception info
522  }
523
524  when(io.uncache.req.fire()){
525    XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n",
526      uop(deqPtr).cf.pc,
527      io.uncache.req.bits.addr,
528      io.uncache.req.bits.data,
529      io.uncache.req.bits.cmd,
530      io.uncache.req.bits.mask
531    )
532  }
533
534  when(io.uncache.resp.fire()){
535    XSDebug("uncache resp: data %x\n", io.dcache.resp.bits.data)
536  }
537
538  // Read vaddr for mem exception
539  io.exceptionAddr.vaddr := dataModule.io.rdata(io.exceptionAddr.lsIdx.lqIdx.value).vaddr
540
541  // misprediction recovery / exception redirect
542  // invalidate lq term using robIdx
543  val needCancel = Wire(Vec(LoadQueueSize, Bool()))
544  for (i <- 0 until LoadQueueSize) {
545    needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect) && allocated(i) && !commited(i)
546    when(needCancel(i)) {
547      when(io.brqRedirect.bits.isReplay){
548        datavalid(i) := false.B
549        writebacked(i) := false.B
550        listening(i) := false.B
551        miss(i) := false.B
552        pending(i) := false.B
553      }.otherwise{
554        allocated(i) := false.B
555      }
556    }
557  }
558  when (io.brqRedirect.valid && io.brqRedirect.bits.isMisPred) {
559    enqPtrExt := enqPtrExt - PopCount(needCancel)
560  }
561
562  // assert(!io.rollback.valid)
563  when(io.rollback.valid) {
564    XSDebug("Mem rollback: pc %x roqidx %d\n", io.rollback.bits.pc, io.rollback.bits.roqIdx.asUInt)
565  }
566
567  // debug info
568  XSDebug("head %d:%d tail %d:%d\n", enqPtrExt.flag, enqPtr, deqPtrExt.flag, deqPtr)
569
570  def PrintFlag(flag: Bool, name: String): Unit = {
571    when(flag) {
572      XSDebug(false, true.B, name)
573    }.otherwise {
574      XSDebug(false, true.B, " ")
575    }
576  }
577
578  for (i <- 0 until LoadQueueSize) {
579    if (i % 4 == 0) XSDebug("")
580    XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.rdata(i).paddr)
581    PrintFlag(allocated(i), "a")
582    PrintFlag(allocated(i) && datavalid(i), "v")
583    PrintFlag(allocated(i) && writebacked(i), "w")
584    PrintFlag(allocated(i) && commited(i), "c")
585    PrintFlag(allocated(i) && miss(i), "m")
586    PrintFlag(allocated(i) && listening(i), "l")
587    PrintFlag(allocated(i) && pending(i), "p")
588    XSDebug(false, true.B, " ")
589    if (i % 4 == 3 || i == LoadQueueSize - 1) XSDebug(false, true.B, "\n")
590  }
591
592}
593