xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueue.scala (revision fd69e556d45aa82ff35fc57c3c3945b0112adad7)
1package xiangshan.mem
2
3import chisel3._
4import chisel3.util._
5import freechips.rocketchip.tile.HasFPUParameters
6import utils._
7import xiangshan._
8import xiangshan.cache._
9import xiangshan.cache.{DCacheLineIO, DCacheWordIO, MemoryOpConstants, TlbRequestIO}
10import xiangshan.backend.LSUOpType
11import xiangshan.mem._
12import xiangshan.backend.roq.RoqPtr
13
14
15class LqPtr extends CircularQueuePtr(LqPtr.LoadQueueSize) { }
16
17object LqPtr extends HasXSParameter {
18  def apply(f: Bool, v: UInt): LqPtr = {
19    val ptr = Wire(new LqPtr)
20    ptr.flag := f
21    ptr.value := v
22    ptr
23  }
24}
25
26trait HasLoadHelper { this: XSModule =>
27  def rdataHelper(uop: MicroOp, rdata: UInt): UInt = {
28    val fpWen = uop.ctrl.fpWen
29    LookupTree(uop.ctrl.fuOpType, List(
30      LSUOpType.lb   -> SignExt(rdata(7, 0) , XLEN),
31      LSUOpType.lh   -> SignExt(rdata(15, 0), XLEN),
32      LSUOpType.lw   -> Mux(fpWen, rdata, SignExt(rdata(31, 0), XLEN)),
33      LSUOpType.ld   -> Mux(fpWen, rdata, SignExt(rdata(63, 0), XLEN)),
34      LSUOpType.lbu  -> ZeroExt(rdata(7, 0) , XLEN),
35      LSUOpType.lhu  -> ZeroExt(rdata(15, 0), XLEN),
36      LSUOpType.lwu  -> ZeroExt(rdata(31, 0), XLEN),
37    ))
38  }
39
40  def fpRdataHelper(uop: MicroOp, rdata: UInt): UInt = {
41    LookupTree(uop.ctrl.fuOpType, List(
42      LSUOpType.lw   -> recode(rdata(31, 0), S),
43      LSUOpType.ld   -> recode(rdata(63, 0), D)
44    ))
45  }
46}
47
48class LqEnqIO extends XSBundle {
49  val canAccept = Output(Bool())
50  val sqCanAccept = Input(Bool())
51  val needAlloc = Vec(RenameWidth, Input(Bool()))
52  val req = Vec(RenameWidth, Flipped(ValidIO(new MicroOp)))
53  val resp = Vec(RenameWidth, Output(new LqPtr))
54}
55
56// Load Queue
57class LoadQueue extends XSModule
58  with HasDCacheParameters
59  with HasCircularQueuePtrHelper
60  with HasLoadHelper
61{
62  val io = IO(new Bundle() {
63    val enq = new LqEnqIO
64    val brqRedirect = Input(Valid(new Redirect))
65    val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle)))
66    val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) // FIXME: Valid() only
67    val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback int load
68    val load_s1 = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
69    val commits = Flipped(new RoqCommitIO)
70    val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store
71    val dcache = new DCacheLineIO
72    val uncache = new DCacheWordIO
73    val roqDeqPtr = Input(new RoqPtr)
74    val exceptionAddr = new ExceptionAddrIO
75  })
76
77  val uop = Reg(Vec(LoadQueueSize, new MicroOp))
78  // val data = Reg(Vec(LoadQueueSize, new LsRoqEntry))
79  val dataModule = Module(new LSQueueData(LoadQueueSize, LoadPipelineWidth))
80  dataModule.io := DontCare
81  val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated
82  val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid
83  val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
84  val commited = Reg(Vec(LoadQueueSize, Bool())) // inst has been writebacked to CDB
85  val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request
86  val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result
87  val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq
88
89  val debug_mmio = Reg(Vec(LoadQueueSize, Bool())) // mmio: inst is an mmio inst
90
91  val enqPtrExt = RegInit(VecInit((0 until RenameWidth).map(_.U.asTypeOf(new LqPtr))))
92  val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
93  val validCounter = RegInit(0.U(log2Ceil(LoadQueueSize + 1).W))
94  val allowEnqueue = RegInit(true.B)
95
96  val enqPtr = enqPtrExt(0).value
97  val deqPtr = deqPtrExt.value
98  val sameFlag = enqPtrExt(0).flag === deqPtrExt.flag
99  val isEmpty = enqPtr === deqPtr && sameFlag
100  val isFull = enqPtr === deqPtr && !sameFlag
101  val allowIn = !isFull
102
103  val loadCommit = (0 until CommitWidth).map(i => io.commits.valid(i) && !io.commits.isWalk && io.commits.info(i).commitType === CommitType.LOAD)
104  val mcommitIdx = (0 until CommitWidth).map(i => io.commits.info(i).lqIdx.value)
105
106  val deqMask = UIntToMask(deqPtr, LoadQueueSize)
107  val enqMask = UIntToMask(enqPtr, LoadQueueSize)
108
109  /**
110    * Enqueue at dispatch
111    *
112    * Currently, LoadQueue only allows enqueue when #emptyEntries > RenameWidth(EnqWidth)
113    */
114  io.enq.canAccept := allowEnqueue
115
116  for (i <- 0 until RenameWidth) {
117    val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i))
118    val lqIdx = enqPtrExt(offset)
119    val index = lqIdx.value
120    when (io.enq.req(i).valid && io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid) {
121      uop(index) := io.enq.req(i).bits
122      allocated(index) := true.B
123      datavalid(index) := false.B
124      writebacked(index) := false.B
125      commited(index) := false.B
126      miss(index) := false.B
127      listening(index) := false.B
128      pending(index) := false.B
129    }
130    io.enq.resp(i) := lqIdx
131  }
132  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
133
134  /**
135    * Writeback load from load units
136    *
137    * Most load instructions writeback to regfile at the same time.
138    * However,
139    *   (1) For an mmio instruction with exceptions, it writes back to ROB immediately.
140    *   (2) For an mmio instruction without exceptions, it does not write back.
141    * The mmio instruction will be sent to lower level when it reaches ROB's head.
142    * After uncache response, it will write back through arbiter with loadUnit.
143    *   (3) For cache misses, it is marked miss and sent to dcache later.
144    * After cache refills, it will write back through arbiter with loadUnit.
145    */
146  for (i <- 0 until LoadPipelineWidth) {
147    dataModule.io.wb(i).wen := false.B
148    when(io.loadIn(i).fire()) {
149      when(io.loadIn(i).bits.miss) {
150        XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n",
151          io.loadIn(i).bits.uop.lqIdx.asUInt,
152          io.loadIn(i).bits.uop.cf.pc,
153          io.loadIn(i).bits.vaddr,
154          io.loadIn(i).bits.paddr,
155          io.loadIn(i).bits.data,
156          io.loadIn(i).bits.mask,
157          io.loadIn(i).bits.forwardData.asUInt,
158          io.loadIn(i).bits.forwardMask.asUInt,
159          io.loadIn(i).bits.mmio,
160          io.loadIn(i).bits.rollback,
161          io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
162          )
163        }.otherwise {
164          XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n",
165          io.loadIn(i).bits.uop.lqIdx.asUInt,
166          io.loadIn(i).bits.uop.cf.pc,
167          io.loadIn(i).bits.vaddr,
168          io.loadIn(i).bits.paddr,
169          io.loadIn(i).bits.data,
170          io.loadIn(i).bits.mask,
171          io.loadIn(i).bits.forwardData.asUInt,
172          io.loadIn(i).bits.forwardMask.asUInt,
173          io.loadIn(i).bits.mmio,
174          io.loadIn(i).bits.rollback,
175          io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
176          )
177        }
178        val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
179        datavalid(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
180        writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
181
182        val loadWbData = Wire(new LsqEntry)
183        loadWbData.paddr := io.loadIn(i).bits.paddr
184        loadWbData.vaddr := io.loadIn(i).bits.vaddr
185        loadWbData.mask := io.loadIn(i).bits.mask
186        loadWbData.data := io.loadIn(i).bits.data // for mmio / misc / debug
187        loadWbData.fwdMask := io.loadIn(i).bits.forwardMask
188        loadWbData.fwdData := io.loadIn(i).bits.forwardData
189        loadWbData.exception := io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
190        dataModule.io.wbWrite(i, loadWbIndex, loadWbData)
191        dataModule.io.wb(i).wen := true.B
192
193        debug_mmio(loadWbIndex) := io.loadIn(i).bits.mmio
194
195        val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
196        miss(loadWbIndex) := dcacheMissed && !io.loadIn(i).bits.uop.cf.exceptionVec.asUInt.orR
197        listening(loadWbIndex) := dcacheMissed
198        pending(loadWbIndex) := io.loadIn(i).bits.mmio && !io.loadIn(i).bits.uop.cf.exceptionVec.asUInt.orR
199        uop(loadWbIndex).debugInfo.issueTime := io.loadIn(i).bits.uop.debugInfo.issueTime
200      }
201    }
202
203  /**
204    * Cache miss request
205    *
206    * (1) writeback: miss
207    * (2) send to dcache: listing
208    * (3) dcache response: datavalid
209    * (4) writeback to ROB: writeback
210    */
211  val inflightReqs = RegInit(VecInit(Seq.fill(cfg.nLoadMissEntries)(0.U.asTypeOf(new InflightBlockInfo))))
212  val inflightReqFull = inflightReqs.map(req => req.valid).reduce(_&&_)
213  val reqBlockIndex = PriorityEncoder(~VecInit(inflightReqs.map(req => req.valid)).asUInt)
214
215  val missRefillSelVec = VecInit(
216    (0 until LoadQueueSize).map{ i =>
217      val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(dataModule.io.rdata(i).paddr)).reduce(_||_)
218      allocated(i) && miss(i) && !inflight
219    })
220
221  val missRefillSel = getFirstOne(missRefillSelVec, deqMask)
222  val missRefillBlockAddr = get_block_addr(dataModule.io.rdata(missRefillSel).paddr)
223  io.dcache.req.valid := missRefillSelVec.asUInt.orR
224  io.dcache.req.bits.cmd := MemoryOpConstants.M_XRD
225  io.dcache.req.bits.addr := missRefillBlockAddr
226  io.dcache.req.bits.data := DontCare
227  io.dcache.req.bits.mask := DontCare
228
229  io.dcache.req.bits.meta.id       := DontCare
230  io.dcache.req.bits.meta.vaddr    := DontCare // dataModule.io.rdata(missRefillSel).vaddr
231  io.dcache.req.bits.meta.paddr    := missRefillBlockAddr
232  io.dcache.req.bits.meta.uop      := uop(missRefillSel)
233  io.dcache.req.bits.meta.mmio     := false.B // mmio(missRefillSel)
234  io.dcache.req.bits.meta.tlb_miss := false.B
235  io.dcache.req.bits.meta.mask     := DontCare
236  io.dcache.req.bits.meta.replay   := false.B
237
238  io.dcache.resp.ready := true.B
239
240  assert(!(debug_mmio(missRefillSel) && io.dcache.req.valid))
241
242  when(io.dcache.req.fire()) {
243    miss(missRefillSel) := false.B
244    listening(missRefillSel) := true.B
245
246    // mark this block as inflight
247    inflightReqs(reqBlockIndex).valid := true.B
248    inflightReqs(reqBlockIndex).block_addr := missRefillBlockAddr
249    assert(!inflightReqs(reqBlockIndex).valid)
250  }
251
252  when(io.dcache.resp.fire()) {
253    val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)).reduce(_||_)
254    assert(inflight)
255    for (i <- 0 until cfg.nLoadMissEntries) {
256      when (inflightReqs(i).valid && inflightReqs(i).block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)) {
257        inflightReqs(i).valid := false.B
258      }
259    }
260  }
261
262
263  when(io.dcache.req.fire()){
264    XSDebug("miss req: pc:0x%x roqIdx:%d lqIdx:%d (p)addr:0x%x vaddr:0x%x\n",
265      io.dcache.req.bits.meta.uop.cf.pc, io.dcache.req.bits.meta.uop.roqIdx.asUInt, io.dcache.req.bits.meta.uop.lqIdx.asUInt,
266      io.dcache.req.bits.addr, io.dcache.req.bits.meta.vaddr
267    )
268  }
269
270  when(io.dcache.resp.fire()){
271    XSDebug("miss resp: pc:0x%x roqIdx:%d lqIdx:%d (p)addr:0x%x data %x\n",
272      io.dcache.resp.bits.meta.uop.cf.pc, io.dcache.resp.bits.meta.uop.roqIdx.asUInt, io.dcache.resp.bits.meta.uop.lqIdx.asUInt,
273      io.dcache.resp.bits.meta.paddr, io.dcache.resp.bits.data
274    )
275  }
276
277  // Refill 64 bit in a cycle
278  // Refill data comes back from io.dcache.resp
279  dataModule.io.refill.dcache := io.dcache.resp.bits
280
281  (0 until LoadQueueSize).map(i => {
282    val blockMatch = get_block_addr(dataModule.io.rdata(i).paddr) === io.dcache.resp.bits.meta.paddr
283    dataModule.io.refill.wen(i) := false.B
284    when(allocated(i) && listening(i) && blockMatch && io.dcache.resp.fire()) {
285      dataModule.io.refill.wen(i) := true.B
286      datavalid(i) := true.B
287      listening(i) := false.B
288    }
289  })
290
291  // writeback up to 2 missed load insts to CDB
292  // just randomly pick 2 missed load (data refilled), write them back to cdb
293  val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => {
294    allocated(i) && datavalid(i) && !writebacked(i)
295  })).asUInt() // use uint instead vec to reduce verilog lines
296  val loadWbSel = Wire(Vec(StorePipelineWidth, UInt(log2Up(LoadQueueSize).W)))
297  val loadWbSelV= Wire(Vec(StorePipelineWidth, Bool()))
298  val loadEvenSelVec = VecInit((0 until LoadQueueSize/2).map(i => {loadWbSelVec(2*i)}))
299  val loadOddSelVec = VecInit((0 until LoadQueueSize/2).map(i => {loadWbSelVec(2*i+1)}))
300  val evenDeqMask = VecInit((0 until LoadQueueSize/2).map(i => {deqMask(2*i)})).asUInt
301  val oddDeqMask = VecInit((0 until LoadQueueSize/2).map(i => {deqMask(2*i+1)})).asUInt
302  loadWbSel(0) := Cat(getFirstOne(loadEvenSelVec, evenDeqMask), 0.U(1.W))
303  loadWbSelV(0):= loadEvenSelVec.asUInt.orR
304  loadWbSel(1) := Cat(getFirstOne(loadOddSelVec, oddDeqMask), 1.U(1.W))
305  loadWbSelV(1) := loadOddSelVec.asUInt.orR
306  (0 until StorePipelineWidth).map(i => {
307    // data select
308    val rdata = dataModule.io.rdata(loadWbSel(i)).data
309    val seluop = uop(loadWbSel(i))
310    val func = seluop.ctrl.fuOpType
311    val raddr = dataModule.io.rdata(loadWbSel(i)).paddr
312    val rdataSel = LookupTree(raddr(2, 0), List(
313      "b000".U -> rdata(63, 0),
314      "b001".U -> rdata(63, 8),
315      "b010".U -> rdata(63, 16),
316      "b011".U -> rdata(63, 24),
317      "b100".U -> rdata(63, 32),
318      "b101".U -> rdata(63, 40),
319      "b110".U -> rdata(63, 48),
320      "b111".U -> rdata(63, 56)
321    ))
322    val rdataPartialLoad = rdataHelper(seluop, rdataSel)
323
324    val validWb = loadWbSelVec(loadWbSel(i)) && loadWbSelV(i)
325
326    // writeback missed int/fp load
327    //
328    // Int load writeback will finish (if not blocked) in one cycle
329    io.ldout(i).bits.uop := seluop
330    io.ldout(i).bits.uop.cf.exceptionVec := dataModule.io.rdata(loadWbSel(i)).exception.asBools
331    io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr)
332    io.ldout(i).bits.data := rdataPartialLoad
333    io.ldout(i).bits.redirectValid := false.B
334    io.ldout(i).bits.redirect := DontCare
335    io.ldout(i).bits.brUpdate := DontCare
336    io.ldout(i).bits.debug.isMMIO := debug_mmio(loadWbSel(i))
337    io.ldout(i).bits.fflags := DontCare
338    io.ldout(i).valid := validWb
339
340    when(io.ldout(i).fire()){
341      writebacked(loadWbSel(i)) := true.B
342    }
343
344    when(io.ldout(i).fire()) {
345      XSInfo("int load miss write to cbd roqidx %d lqidx %d pc 0x%x paddr %x data %x mmio %x\n",
346        io.ldout(i).bits.uop.roqIdx.asUInt,
347        io.ldout(i).bits.uop.lqIdx.asUInt,
348        io.ldout(i).bits.uop.cf.pc,
349        dataModule.io.rdata(loadWbSel(i)).paddr,
350        dataModule.io.rdata(loadWbSel(i)).data,
351        debug_mmio(loadWbSel(i))
352      )
353    }
354
355  })
356
357  /**
358    * Load commits
359    *
360    * When load commited, mark it as !allocated and move deqPtrExt forward.
361    */
362  (0 until CommitWidth).map(i => {
363    when(loadCommit(i)) {
364      allocated(mcommitIdx(i)) := false.B
365      XSDebug("load commit %d: idx %d %x\n", i.U, mcommitIdx(i), uop(mcommitIdx(i)).cf.pc)
366    }
367  })
368
369  def getFirstOne(mask: Vec[Bool], startMask: UInt) = {
370    val length = mask.length
371    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
372    val highBitsUint = Cat(highBits.reverse)
373    PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt))
374  }
375
376  def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = {
377    assert(valid.length == uop.length)
378    assert(valid.length == 2)
379    Mux(valid(0) && valid(1),
380      Mux(isAfter(uop(0).roqIdx, uop(1).roqIdx), uop(1), uop(0)),
381      Mux(valid(0) && !valid(1), uop(0), uop(1)))
382  }
383
384  def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = {
385    assert(valid.length == uop.length)
386    val length = valid.length
387    (0 until length).map(i => {
388      (0 until length).map(j => {
389        Mux(valid(i) && valid(j),
390          isAfter(uop(i).roqIdx, uop(j).roqIdx),
391          Mux(!valid(i), true.B, false.B))
392      })
393    })
394  }
395
396  /**
397    * Memory violation detection
398    *
399    * When store writes back, it searches LoadQueue for younger load instructions
400    * with the same load physical address. They loaded wrong data and need re-execution.
401    *
402    * Cycle 0: Store Writeback
403    *   Generate match vector for store address with rangeMask(stPtr, enqPtr).
404    *   Besides, load instructions in LoadUnit_S1 and S2 are also checked.
405    * Cycle 1: Redirect Generation
406    *   There're three possible types of violations. Choose the oldest load.
407    *   Set io.redirect according to the detected violation.
408    */
409  io.load_s1 := DontCare
410  def detectRollback(i: Int) = {
411    val startIndex = io.storeIn(i).bits.uop.lqIdx.value
412    val lqIdxMask = UIntToMask(startIndex, LoadQueueSize)
413    val xorMask = lqIdxMask ^ enqMask
414    val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt(0).flag
415    val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
416
417    // check if load already in lq needs to be rolledback
418    val lqViolationVec = RegNext(VecInit((0 until LoadQueueSize).map(j => {
419      val addrMatch = allocated(j) &&
420        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === dataModule.io.rdata(j).paddr(PAddrBits - 1, 3)
421      val entryNeedCheck = toEnqPtrMask(j) && addrMatch && (datavalid(j) || listening(j) || miss(j))
422      // TODO: update refilled data
423      val violationVec = (0 until 8).map(k => dataModule.io.rdata(j).mask(k) && io.storeIn(i).bits.mask(k))
424      Cat(violationVec).orR() && entryNeedCheck
425    })))
426    val lqViolation = lqViolationVec.asUInt().orR()
427    val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask))
428    val lqViolationUop = uop(lqViolationIndex)
429    // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag
430    // lqViolationUop.lqIdx.value := lqViolationIndex
431    XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n")
432
433    // when l/s writeback to roq together, check if rollback is needed
434    val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
435      io.loadIn(j).valid &&
436        isAfter(io.loadIn(j).bits.uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
437        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) &&
438        (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR
439    })))
440    val wbViolation = wbViolationVec.asUInt().orR()
441    val wbViolationUop = getOldestInTwo(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits.uop))))
442    XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n")
443
444    // check if rollback is needed for load in l1
445    val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
446      io.load_s1(j).valid && // L1 valid
447        isAfter(io.load_s1(j).uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
448        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.load_s1(j).paddr(PAddrBits - 1, 3) &&
449        (io.storeIn(i).bits.mask & io.load_s1(j).mask).orR
450    })))
451    val l1Violation = l1ViolationVec.asUInt().orR()
452    val l1ViolationUop = getOldestInTwo(l1ViolationVec, RegNext(VecInit(io.load_s1.map(_.uop))))
453    XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n")
454
455    val rollbackValidVec = Seq(lqViolation, wbViolation, l1Violation)
456    val rollbackUopVec = Seq(lqViolationUop, wbViolationUop, l1ViolationUop)
457
458    val mask = getAfterMask(rollbackValidVec, rollbackUopVec)
459    val oneAfterZero = mask(1)(0)
460    val rollbackUop = Mux(oneAfterZero && mask(2)(0),
461      rollbackUopVec(0),
462      Mux(!oneAfterZero && mask(2)(1), rollbackUopVec(1), rollbackUopVec(2)))
463
464    XSDebug(
465      l1Violation,
466      "need rollback (l4 load) pc %x roqidx %d target %x\n",
467      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, l1ViolationUop.roqIdx.asUInt
468    )
469    XSDebug(
470      lqViolation,
471      "need rollback (ld wb before store) pc %x roqidx %d target %x\n",
472      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, lqViolationUop.roqIdx.asUInt
473    )
474    XSDebug(
475      wbViolation,
476      "need rollback (ld/st wb together) pc %x roqidx %d target %x\n",
477      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, wbViolationUop.roqIdx.asUInt
478    )
479
480    (RegNext(io.storeIn(i).valid) && Cat(rollbackValidVec).orR, rollbackUop)
481  }
482
483  // rollback check
484  val rollback = Wire(Vec(StorePipelineWidth, Valid(new MicroOp)))
485  for (i <- 0 until StorePipelineWidth) {
486    val detectedRollback = detectRollback(i)
487    rollback(i).valid := detectedRollback._1
488    rollback(i).bits := detectedRollback._2
489  }
490
491  def rollbackSel(a: Valid[MicroOp], b: Valid[MicroOp]): ValidIO[MicroOp] = {
492    Mux(
493      a.valid,
494      Mux(
495        b.valid,
496        Mux(isAfter(a.bits.roqIdx, b.bits.roqIdx), b, a), // a,b both valid, sel oldest
497        a // sel a
498      ),
499      b // sel b
500    )
501  }
502
503  val rollbackSelected = ParallelOperation(rollback, rollbackSel)
504  val lastCycleRedirect = RegNext(io.brqRedirect)
505
506  // Note that we use roqIdx - 1.U to flush the load instruction itself.
507  // Thus, here if last cycle's roqIdx equals to this cycle's roqIdx, it still triggers the redirect.
508  io.rollback.valid := rollbackSelected.valid &&
509    (!lastCycleRedirect.valid || !isAfter(rollbackSelected.bits.roqIdx, lastCycleRedirect.bits.roqIdx)) &&
510    !(lastCycleRedirect.valid && lastCycleRedirect.bits.isUnconditional())
511
512  io.rollback.bits.roqIdx := rollbackSelected.bits.roqIdx
513  io.rollback.bits.level := RedirectLevel.flush
514  io.rollback.bits.interrupt := DontCare
515  io.rollback.bits.pc := DontCare
516  io.rollback.bits.target := rollbackSelected.bits.cf.pc
517  io.rollback.bits.brTag := rollbackSelected.bits.brTag
518
519  when(io.rollback.valid) {
520    XSDebug("Mem rollback: pc %x roqidx %d\n", io.rollback.bits.pc, io.rollback.bits.roqIdx.asUInt)
521  }
522
523  /**
524    * Memory mapped IO / other uncached operations
525    *
526    */
527  io.uncache.req.valid := pending(deqPtr) && allocated(deqPtr) &&
528    io.commits.info(0).commitType === CommitType.LOAD &&
529    io.roqDeqPtr === uop(deqPtr).roqIdx &&
530    !io.commits.isWalk
531
532  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
533  io.uncache.req.bits.addr := dataModule.io.rdata(deqPtr).paddr
534  io.uncache.req.bits.data := dataModule.io.rdata(deqPtr).data
535  io.uncache.req.bits.mask := dataModule.io.rdata(deqPtr).mask
536
537  io.uncache.req.bits.meta.id       := DontCare
538  io.uncache.req.bits.meta.vaddr    := DontCare
539  io.uncache.req.bits.meta.paddr    := dataModule.io.rdata(deqPtr).paddr
540  io.uncache.req.bits.meta.uop      := uop(deqPtr)
541  io.uncache.req.bits.meta.mmio     := true.B
542  io.uncache.req.bits.meta.tlb_miss := false.B
543  io.uncache.req.bits.meta.mask     := dataModule.io.rdata(deqPtr).mask
544  io.uncache.req.bits.meta.replay   := false.B
545
546  io.uncache.resp.ready := true.B
547
548  when (io.uncache.req.fire()) {
549    pending(deqPtr) := false.B
550
551    XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n",
552      uop(deqPtr).cf.pc,
553      io.uncache.req.bits.addr,
554      io.uncache.req.bits.data,
555      io.uncache.req.bits.cmd,
556      io.uncache.req.bits.mask
557    )
558  }
559
560  dataModule.io.uncache.wen := false.B
561  when(io.uncache.resp.fire()){
562    datavalid(deqPtr) := true.B
563    dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0))
564    dataModule.io.uncache.wen := true.B
565
566    XSDebug("uncache resp: data %x\n", io.dcache.resp.bits.data)
567  }
568
569  // Read vaddr for mem exception
570  io.exceptionAddr.vaddr := dataModule.io.rdata(io.exceptionAddr.lsIdx.lqIdx.value).vaddr
571
572  // misprediction recovery / exception redirect
573  // invalidate lq term using robIdx
574  val needCancel = Wire(Vec(LoadQueueSize, Bool()))
575  for (i <- 0 until LoadQueueSize) {
576    needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect) && allocated(i) && !commited(i)
577    when (needCancel(i)) {
578        allocated(i) := false.B
579    }
580  }
581
582  /**
583    * update pointers
584    */
585  val lastCycleCancelCount = PopCount(RegNext(needCancel))
586  // when io.brqRedirect.valid, we don't allow eneuque even though it may fire.
587  val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid, PopCount(io.enq.req.map(_.valid)), 0.U)
588  when (lastCycleRedirect.valid) {
589    // we recover the pointers in the next cycle after redirect
590    enqPtrExt := VecInit(enqPtrExt.map(_ - lastCycleCancelCount))
591  }.otherwise {
592    enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
593  }
594
595  val commitCount = PopCount(loadCommit)
596  deqPtrExt := deqPtrExt + commitCount
597
598  val lastLastCycleRedirect = RegNext(lastCycleRedirect.valid)
599  val trueValidCounter = distanceBetween(enqPtrExt(0), deqPtrExt)
600  validCounter := Mux(lastLastCycleRedirect,
601    trueValidCounter,
602    validCounter + enqNumber - commitCount
603  )
604
605  allowEnqueue := Mux(io.brqRedirect.valid,
606    false.B,
607    Mux(lastLastCycleRedirect,
608      trueValidCounter <= (LoadQueueSize - RenameWidth).U,
609      validCounter + enqNumber <= (LoadQueueSize - RenameWidth).U
610    )
611  )
612
613  // debug info
614  XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr)
615
616  def PrintFlag(flag: Bool, name: String): Unit = {
617    when(flag) {
618      XSDebug(false, true.B, name)
619    }.otherwise {
620      XSDebug(false, true.B, " ")
621    }
622  }
623
624  for (i <- 0 until LoadQueueSize) {
625    if (i % 4 == 0) XSDebug("")
626    XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.rdata(i).paddr)
627    PrintFlag(allocated(i), "a")
628    PrintFlag(allocated(i) && datavalid(i), "v")
629    PrintFlag(allocated(i) && writebacked(i), "w")
630    PrintFlag(allocated(i) && commited(i), "c")
631    PrintFlag(allocated(i) && miss(i), "m")
632    PrintFlag(allocated(i) && listening(i), "l")
633    PrintFlag(allocated(i) && pending(i), "p")
634    XSDebug(false, true.B, " ")
635    if (i % 4 == 3 || i == LoadQueueSize - 1) XSDebug(false, true.B, "\n")
636  }
637
638}
639