xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueue.scala (revision 1d975383c1e1ca43d55d67ea6f5bc60cb8a16fdb)
1package xiangshan.mem
2
3import chisel3._
4import chisel3.util._
5import utils._
6import xiangshan._
7import xiangshan.cache._
8import xiangshan.cache.{DCacheWordIO, DCacheLineIO, TlbRequestIO, MemoryOpConstants}
9import xiangshan.backend.LSUOpType
10import xiangshan.mem._
11import xiangshan.backend.roq.RoqPtr
12import xiangshan.backend.fu.fpu.boxF32ToF64
13
14
15class LqPtr extends CircularQueuePtr(LqPtr.LoadQueueSize) { }
16
17object LqPtr extends HasXSParameter {
18  def apply(f: Bool, v: UInt): LqPtr = {
19    val ptr = Wire(new LqPtr)
20    ptr.flag := f
21    ptr.value := v
22    ptr
23  }
24}
25
26class LqEnqIO extends XSBundle {
27  val canAccept = Output(Bool())
28  val sqCanAccept = Input(Bool())
29  val needAlloc = Vec(RenameWidth, Input(Bool()))
30  val req = Vec(RenameWidth, Flipped(ValidIO(new MicroOp)))
31  val resp = Vec(RenameWidth, Output(new LqPtr))
32}
33
34// Load Queue
35class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueuePtrHelper {
36  val io = IO(new Bundle() {
37    val enq = new LqEnqIO
38    val brqRedirect = Input(Valid(new Redirect))
39    val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle)))
40    val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) // FIXME: Valid() only
41    val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback load
42    val load_s1 = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
43    val commits = Flipped(new RoqCommitIO)
44    val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store
45    val dcache = new DCacheLineIO
46    val uncache = new DCacheWordIO
47    val roqDeqPtr = Input(new RoqPtr)
48    val exceptionAddr = new ExceptionAddrIO
49  })
50
51  val uop = Reg(Vec(LoadQueueSize, new MicroOp))
52  // val data = Reg(Vec(LoadQueueSize, new LsRoqEntry))
53  val dataModule = Module(new LSQueueData(LoadQueueSize, LoadPipelineWidth))
54  dataModule.io := DontCare
55  val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated
56  val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid
57  val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
58  val commited = Reg(Vec(LoadQueueSize, Bool())) // inst has been writebacked to CDB
59  val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request
60  val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result
61  val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq
62
63  val enqPtrExt = RegInit(VecInit((0 until RenameWidth).map(_.U.asTypeOf(new LqPtr))))
64  val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
65  val validCounter = RegInit(0.U(log2Ceil(LoadQueueSize + 1).W))
66  val allowEnqueue = RegInit(true.B)
67
68  val enqPtr = enqPtrExt(0).value
69  val deqPtr = deqPtrExt.value
70  val sameFlag = enqPtrExt(0).flag === deqPtrExt.flag
71  val isEmpty = enqPtr === deqPtr && sameFlag
72  val isFull = enqPtr === deqPtr && !sameFlag
73  val allowIn = !isFull
74
75  val loadCommit = (0 until CommitWidth).map(i => io.commits.valid(i) && !io.commits.isWalk && io.commits.info(i).commitType === CommitType.LOAD)
76  val mcommitIdx = (0 until CommitWidth).map(i => io.commits.info(i).lqIdx.value)
77
78  val deqMask = UIntToMask(deqPtr, LoadQueueSize)
79  val enqMask = UIntToMask(enqPtr, LoadQueueSize)
80
81  /**
82    * Enqueue at dispatch
83    *
84    * Currently, LoadQueue only allows enqueue when #emptyEntries > RenameWidth(EnqWidth)
85    */
86  io.enq.canAccept := allowEnqueue
87
88  for (i <- 0 until RenameWidth) {
89    val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i))
90    val lqIdx = enqPtrExt(offset)
91    val index = lqIdx.value
92    when (io.enq.req(i).valid && io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid) {
93      uop(index) := io.enq.req(i).bits
94      allocated(index) := true.B
95      datavalid(index) := false.B
96      writebacked(index) := false.B
97      commited(index) := false.B
98      miss(index) := false.B
99      listening(index) := false.B
100      pending(index) := false.B
101    }
102    io.enq.resp(i) := lqIdx
103  }
104  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
105
106  /**
107    * Writeback load from load units
108    *
109    * Most load instructions writeback to regfile at the same time.
110    * However,
111    *   (1) For an mmio instruction with exceptions, it writes back to ROB immediately.
112    *   (2) For an mmio instruction without exceptions, it does not write back.
113    * The mmio instruction will be sent to lower level when it reaches ROB's head.
114    * After uncache response, it will write back through arbiter with loadUnit.
115    *   (3) For cache misses, it is marked miss and sent to dcache later.
116    * After cache refills, it will write back through arbiter with loadUnit.
117    */
118  for (i <- 0 until LoadPipelineWidth) {
119    dataModule.io.wb(i).wen := false.B
120    when(io.loadIn(i).fire()) {
121      when(io.loadIn(i).bits.miss) {
122        XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n",
123          io.loadIn(i).bits.uop.lqIdx.asUInt,
124          io.loadIn(i).bits.uop.cf.pc,
125          io.loadIn(i).bits.vaddr,
126          io.loadIn(i).bits.paddr,
127          io.loadIn(i).bits.data,
128          io.loadIn(i).bits.mask,
129          io.loadIn(i).bits.forwardData.asUInt,
130          io.loadIn(i).bits.forwardMask.asUInt,
131          io.loadIn(i).bits.mmio,
132          io.loadIn(i).bits.rollback,
133          io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
134          )
135        }.otherwise {
136          XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n",
137          io.loadIn(i).bits.uop.lqIdx.asUInt,
138          io.loadIn(i).bits.uop.cf.pc,
139          io.loadIn(i).bits.vaddr,
140          io.loadIn(i).bits.paddr,
141          io.loadIn(i).bits.data,
142          io.loadIn(i).bits.mask,
143          io.loadIn(i).bits.forwardData.asUInt,
144          io.loadIn(i).bits.forwardMask.asUInt,
145          io.loadIn(i).bits.mmio,
146          io.loadIn(i).bits.rollback,
147          io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
148          )
149        }
150        val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
151        datavalid(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
152        writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
153
154        val loadWbData = Wire(new LsqEntry)
155        loadWbData.paddr := io.loadIn(i).bits.paddr
156        loadWbData.vaddr := io.loadIn(i).bits.vaddr
157        loadWbData.mask := io.loadIn(i).bits.mask
158        loadWbData.data := io.loadIn(i).bits.data // for mmio / misc / debug
159        loadWbData.mmio := io.loadIn(i).bits.mmio
160        loadWbData.fwdMask := io.loadIn(i).bits.forwardMask
161        loadWbData.fwdData := io.loadIn(i).bits.forwardData
162        loadWbData.exception := io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
163        dataModule.io.wbWrite(i, loadWbIndex, loadWbData)
164        dataModule.io.wb(i).wen := true.B
165
166        val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
167        miss(loadWbIndex) := dcacheMissed && !io.loadIn(i).bits.uop.cf.exceptionVec.asUInt.orR
168        listening(loadWbIndex) := dcacheMissed
169        pending(loadWbIndex) := io.loadIn(i).bits.mmio && !io.loadIn(i).bits.uop.cf.exceptionVec.asUInt.orR
170        uop(loadWbIndex).debugInfo.issueTime := io.loadIn(i).bits.uop.debugInfo.issueTime
171      }
172    }
173
174  /**
175    * Cache miss request
176    *
177    * (1) writeback: miss
178    * (2) send to dcache: listing
179    * (3) dcache response: datavalid
180    * (4) writeback to ROB: writeback
181    */
182  val inflightReqs = RegInit(VecInit(Seq.fill(cfg.nLoadMissEntries)(0.U.asTypeOf(new InflightBlockInfo))))
183  val inflightReqFull = inflightReqs.map(req => req.valid).reduce(_&&_)
184  val reqBlockIndex = PriorityEncoder(~VecInit(inflightReqs.map(req => req.valid)).asUInt)
185
186  val missRefillSelVec = VecInit(
187    (0 until LoadQueueSize).map{ i =>
188      val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(dataModule.io.rdata(i).paddr)).reduce(_||_)
189      allocated(i) && miss(i) && !inflight
190    })
191
192  val missRefillSel = getFirstOne(missRefillSelVec, deqMask)
193  val missRefillBlockAddr = get_block_addr(dataModule.io.rdata(missRefillSel).paddr)
194  io.dcache.req.valid := missRefillSelVec.asUInt.orR
195  io.dcache.req.bits.cmd := MemoryOpConstants.M_XRD
196  io.dcache.req.bits.addr := missRefillBlockAddr
197  io.dcache.req.bits.data := DontCare
198  io.dcache.req.bits.mask := DontCare
199
200  io.dcache.req.bits.meta.id       := DontCare
201  io.dcache.req.bits.meta.vaddr    := DontCare // dataModule.io.rdata(missRefillSel).vaddr
202  io.dcache.req.bits.meta.paddr    := missRefillBlockAddr
203  io.dcache.req.bits.meta.uop      := uop(missRefillSel)
204  io.dcache.req.bits.meta.mmio     := false.B // dataModule.io.rdata(missRefillSel).mmio
205  io.dcache.req.bits.meta.tlb_miss := false.B
206  io.dcache.req.bits.meta.mask     := DontCare
207  io.dcache.req.bits.meta.replay   := false.B
208
209  io.dcache.resp.ready := true.B
210
211  assert(!(dataModule.io.rdata(missRefillSel).mmio && io.dcache.req.valid))
212
213  when(io.dcache.req.fire()) {
214    miss(missRefillSel) := false.B
215    listening(missRefillSel) := true.B
216
217    // mark this block as inflight
218    inflightReqs(reqBlockIndex).valid := true.B
219    inflightReqs(reqBlockIndex).block_addr := missRefillBlockAddr
220    assert(!inflightReqs(reqBlockIndex).valid)
221  }
222
223  when(io.dcache.resp.fire()) {
224    val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)).reduce(_||_)
225    assert(inflight)
226    for (i <- 0 until cfg.nLoadMissEntries) {
227      when (inflightReqs(i).valid && inflightReqs(i).block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)) {
228        inflightReqs(i).valid := false.B
229      }
230    }
231  }
232
233
234  when(io.dcache.req.fire()){
235    XSDebug("miss req: pc:0x%x roqIdx:%d lqIdx:%d (p)addr:0x%x vaddr:0x%x\n",
236      io.dcache.req.bits.meta.uop.cf.pc, io.dcache.req.bits.meta.uop.roqIdx.asUInt, io.dcache.req.bits.meta.uop.lqIdx.asUInt,
237      io.dcache.req.bits.addr, io.dcache.req.bits.meta.vaddr
238    )
239  }
240
241  when(io.dcache.resp.fire()){
242    XSDebug("miss resp: pc:0x%x roqIdx:%d lqIdx:%d (p)addr:0x%x data %x\n",
243      io.dcache.resp.bits.meta.uop.cf.pc, io.dcache.resp.bits.meta.uop.roqIdx.asUInt, io.dcache.resp.bits.meta.uop.lqIdx.asUInt,
244      io.dcache.resp.bits.meta.paddr, io.dcache.resp.bits.data
245    )
246  }
247
248  // Refill 64 bit in a cycle
249  // Refill data comes back from io.dcache.resp
250  dataModule.io.refill.dcache := io.dcache.resp.bits
251
252  (0 until LoadQueueSize).map(i => {
253    val blockMatch = get_block_addr(dataModule.io.rdata(i).paddr) === io.dcache.resp.bits.meta.paddr
254    dataModule.io.refill.wen(i) := false.B
255    when(allocated(i) && listening(i) && blockMatch && io.dcache.resp.fire()) {
256      dataModule.io.refill.wen(i) := true.B
257      datavalid(i) := true.B
258      listening(i) := false.B
259    }
260  })
261
262  // writeback up to 2 missed load insts to CDB
263  // just randomly pick 2 missed load (data refilled), write them back to cdb
264  val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => {
265    allocated(i) && datavalid(i) && !writebacked(i)
266  })).asUInt() // use uint instead vec to reduce verilog lines
267  val loadWbSel = Wire(Vec(StorePipelineWidth, UInt(log2Up(LoadQueueSize).W)))
268  val loadWbSelV= Wire(Vec(StorePipelineWidth, Bool()))
269  val lselvec0 = PriorityEncoderOH(loadWbSelVec)
270  val lselvec1 = PriorityEncoderOH(loadWbSelVec & (~lselvec0).asUInt)
271  loadWbSel(0) := OHToUInt(lselvec0)
272  loadWbSelV(0):= lselvec0.orR
273  loadWbSel(1) := OHToUInt(lselvec1)
274  loadWbSelV(1) := lselvec1.orR
275  (0 until StorePipelineWidth).map(i => {
276    // data select
277    val rdata = dataModule.io.rdata(loadWbSel(i)).data
278    val func = uop(loadWbSel(i)).ctrl.fuOpType
279    val raddr = dataModule.io.rdata(loadWbSel(i)).paddr
280    val rdataSel = LookupTree(raddr(2, 0), List(
281      "b000".U -> rdata(63, 0),
282      "b001".U -> rdata(63, 8),
283      "b010".U -> rdata(63, 16),
284      "b011".U -> rdata(63, 24),
285      "b100".U -> rdata(63, 32),
286      "b101".U -> rdata(63, 40),
287      "b110".U -> rdata(63, 48),
288      "b111".U -> rdata(63, 56)
289    ))
290    val rdataPartialLoad = LookupTree(func, List(
291        LSUOpType.lb   -> SignExt(rdataSel(7, 0) , XLEN),
292        LSUOpType.lh   -> SignExt(rdataSel(15, 0), XLEN),
293        LSUOpType.lw   -> SignExt(rdataSel(31, 0), XLEN),
294        LSUOpType.ld   -> SignExt(rdataSel(63, 0), XLEN),
295        LSUOpType.lbu  -> ZeroExt(rdataSel(7, 0) , XLEN),
296        LSUOpType.lhu  -> ZeroExt(rdataSel(15, 0), XLEN),
297        LSUOpType.lwu  -> ZeroExt(rdataSel(31, 0), XLEN),
298        LSUOpType.flw  -> boxF32ToF64(rdataSel(31, 0))
299    ))
300    io.ldout(i).bits.uop := uop(loadWbSel(i))
301    io.ldout(i).bits.uop.cf.exceptionVec := dataModule.io.rdata(loadWbSel(i)).exception.asBools
302    io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr)
303    io.ldout(i).bits.data := rdataPartialLoad
304    io.ldout(i).bits.redirectValid := false.B
305    io.ldout(i).bits.redirect := DontCare
306    io.ldout(i).bits.brUpdate := DontCare
307    io.ldout(i).bits.debug.isMMIO := dataModule.io.rdata(loadWbSel(i)).mmio
308    io.ldout(i).bits.fflags := DontCare
309    io.ldout(i).valid := loadWbSelVec(loadWbSel(i)) && loadWbSelV(i)
310    when(io.ldout(i).fire()) {
311      writebacked(loadWbSel(i)) := true.B
312      XSInfo("load miss write to cbd roqidx %d lqidx %d pc 0x%x paddr %x data %x mmio %x\n",
313        io.ldout(i).bits.uop.roqIdx.asUInt,
314        io.ldout(i).bits.uop.lqIdx.asUInt,
315        io.ldout(i).bits.uop.cf.pc,
316        dataModule.io.rdata(loadWbSel(i)).paddr,
317        dataModule.io.rdata(loadWbSel(i)).data,
318        dataModule.io.rdata(loadWbSel(i)).mmio
319      )
320    }
321  })
322
323  /**
324    * Load commits
325    *
326    * When load commited, mark it as !allocated and move deqPtrExt forward.
327    */
328  (0 until CommitWidth).map(i => {
329    when(loadCommit(i)) {
330      allocated(mcommitIdx(i)) := false.B
331      XSDebug("load commit %d: idx %d %x\n", i.U, mcommitIdx(i), uop(mcommitIdx(i)).cf.pc)
332    }
333  })
334
335  def getFirstOne(mask: Vec[Bool], startMask: UInt) = {
336    val length = mask.length
337    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
338    val highBitsUint = Cat(highBits.reverse)
339    PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt))
340  }
341
342  def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = {
343    assert(valid.length == uop.length)
344    assert(valid.length == 2)
345    Mux(valid(0) && valid(1),
346      Mux(isAfter(uop(0).roqIdx, uop(1).roqIdx), uop(1), uop(0)),
347      Mux(valid(0) && !valid(1), uop(0), uop(1)))
348  }
349
350  def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = {
351    assert(valid.length == uop.length)
352    val length = valid.length
353    (0 until length).map(i => {
354      (0 until length).map(j => {
355        Mux(valid(i) && valid(j),
356          isAfter(uop(i).roqIdx, uop(j).roqIdx),
357          Mux(!valid(i), true.B, false.B))
358      })
359    })
360  }
361
362  /**
363    * Memory violation detection
364    *
365    * When store writes back, it searches LoadQueue for younger load instructions
366    * with the same load physical address. They loaded wrong data and need re-execution.
367    *
368    * Cycle 0: Store Writeback
369    *   Generate match vector for store address with rangeMask(stPtr, enqPtr).
370    *   Besides, load instructions in LoadUnit_S1 and S2 are also checked.
371    * Cycle 1: Redirect Generation
372    *   There're three possible types of violations. Choose the oldest load.
373    *   Set io.redirect according to the detected violation.
374    */
375  io.load_s1 := DontCare
376  def detectRollback(i: Int) = {
377    val startIndex = io.storeIn(i).bits.uop.lqIdx.value
378    val lqIdxMask = UIntToMask(startIndex, LoadQueueSize)
379    val xorMask = lqIdxMask ^ enqMask
380    val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt(0).flag
381    val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
382
383    // check if load already in lq needs to be rolledback
384    val lqViolationVec = RegNext(VecInit((0 until LoadQueueSize).map(j => {
385      val addrMatch = allocated(j) &&
386        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === dataModule.io.rdata(j).paddr(PAddrBits - 1, 3)
387      val entryNeedCheck = toEnqPtrMask(j) && addrMatch && (datavalid(j) || listening(j) || miss(j))
388      // TODO: update refilled data
389      val violationVec = (0 until 8).map(k => dataModule.io.rdata(j).mask(k) && io.storeIn(i).bits.mask(k))
390      Cat(violationVec).orR() && entryNeedCheck
391    })))
392    val lqViolation = lqViolationVec.asUInt().orR()
393    val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask))
394    val lqViolationUop = uop(lqViolationIndex)
395    // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag
396    // lqViolationUop.lqIdx.value := lqViolationIndex
397    XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n")
398
399    // when l/s writeback to roq together, check if rollback is needed
400    val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
401      io.loadIn(j).valid &&
402        isAfter(io.loadIn(j).bits.uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
403        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) &&
404        (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR
405    })))
406    val wbViolation = wbViolationVec.asUInt().orR()
407    val wbViolationUop = getOldestInTwo(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits.uop))))
408    XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n")
409
410    // check if rollback is needed for load in l1
411    val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
412      io.load_s1(j).valid && // L1 valid
413        isAfter(io.load_s1(j).uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
414        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.load_s1(j).paddr(PAddrBits - 1, 3) &&
415        (io.storeIn(i).bits.mask & io.load_s1(j).mask).orR
416    })))
417    val l1Violation = l1ViolationVec.asUInt().orR()
418    val l1ViolationUop = getOldestInTwo(l1ViolationVec, RegNext(VecInit(io.load_s1.map(_.uop))))
419    XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n")
420
421    val rollbackValidVec = Seq(lqViolation, wbViolation, l1Violation)
422    val rollbackUopVec = Seq(lqViolationUop, wbViolationUop, l1ViolationUop)
423
424    val mask = getAfterMask(rollbackValidVec, rollbackUopVec)
425    val oneAfterZero = mask(1)(0)
426    val rollbackUop = Mux(oneAfterZero && mask(2)(0),
427      rollbackUopVec(0),
428      Mux(!oneAfterZero && mask(2)(1), rollbackUopVec(1), rollbackUopVec(2)))
429
430    XSDebug(
431      l1Violation,
432      "need rollback (l4 load) pc %x roqidx %d target %x\n",
433      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, l1ViolationUop.roqIdx.asUInt
434    )
435    XSDebug(
436      lqViolation,
437      "need rollback (ld wb before store) pc %x roqidx %d target %x\n",
438      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, lqViolationUop.roqIdx.asUInt
439    )
440    XSDebug(
441      wbViolation,
442      "need rollback (ld/st wb together) pc %x roqidx %d target %x\n",
443      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, wbViolationUop.roqIdx.asUInt
444    )
445
446    (RegNext(io.storeIn(i).valid) && Cat(rollbackValidVec).orR, rollbackUop)
447  }
448
449  // rollback check
450  val rollback = Wire(Vec(StorePipelineWidth, Valid(new MicroOp)))
451  for (i <- 0 until StorePipelineWidth) {
452    val detectedRollback = detectRollback(i)
453    rollback(i).valid := detectedRollback._1
454    rollback(i).bits := detectedRollback._2
455  }
456
457  def rollbackSel(a: Valid[MicroOp], b: Valid[MicroOp]): ValidIO[MicroOp] = {
458    Mux(
459      a.valid,
460      Mux(
461        b.valid,
462        Mux(isAfter(a.bits.roqIdx, b.bits.roqIdx), b, a), // a,b both valid, sel oldest
463        a // sel a
464      ),
465      b // sel b
466    )
467  }
468
469  val rollbackSelected = ParallelOperation(rollback, rollbackSel)
470  val lastCycleRedirect = RegNext(io.brqRedirect)
471
472  // Note that we use roqIdx - 1.U to flush the load instruction itself.
473  // Thus, here if last cycle's roqIdx equals to this cycle's roqIdx, it still triggers the redirect.
474  io.rollback.valid := rollbackSelected.valid &&
475    (!lastCycleRedirect.valid || !isAfter(rollbackSelected.bits.roqIdx, lastCycleRedirect.bits.roqIdx)) &&
476    !(lastCycleRedirect.valid && lastCycleRedirect.bits.isUnconditional())
477
478  io.rollback.bits.roqIdx := rollbackSelected.bits.roqIdx
479  io.rollback.bits.level := RedirectLevel.flush
480  io.rollback.bits.interrupt := DontCare
481  io.rollback.bits.pc := DontCare
482  io.rollback.bits.target := rollbackSelected.bits.cf.pc
483  io.rollback.bits.brTag := rollbackSelected.bits.brTag
484
485  when(io.rollback.valid) {
486    XSDebug("Mem rollback: pc %x roqidx %d\n", io.rollback.bits.pc, io.rollback.bits.roqIdx.asUInt)
487  }
488
489  /**
490    * Memory mapped IO / other uncached operations
491    *
492    */
493  io.uncache.req.valid := pending(deqPtr) && allocated(deqPtr) &&
494    io.commits.info(0).commitType === CommitType.LOAD &&
495    io.roqDeqPtr === uop(deqPtr).roqIdx &&
496    !io.commits.isWalk
497
498  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
499  io.uncache.req.bits.addr := dataModule.io.rdata(deqPtr).paddr
500  io.uncache.req.bits.data := dataModule.io.rdata(deqPtr).data
501  io.uncache.req.bits.mask := dataModule.io.rdata(deqPtr).mask
502
503  io.uncache.req.bits.meta.id       := DontCare
504  io.uncache.req.bits.meta.vaddr    := DontCare
505  io.uncache.req.bits.meta.paddr    := dataModule.io.rdata(deqPtr).paddr
506  io.uncache.req.bits.meta.uop      := uop(deqPtr)
507  io.uncache.req.bits.meta.mmio     := true.B
508  io.uncache.req.bits.meta.tlb_miss := false.B
509  io.uncache.req.bits.meta.mask     := dataModule.io.rdata(deqPtr).mask
510  io.uncache.req.bits.meta.replay   := false.B
511
512  io.uncache.resp.ready := true.B
513
514  when (io.uncache.req.fire()) {
515    pending(deqPtr) := false.B
516
517    XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n",
518      uop(deqPtr).cf.pc,
519      io.uncache.req.bits.addr,
520      io.uncache.req.bits.data,
521      io.uncache.req.bits.cmd,
522      io.uncache.req.bits.mask
523    )
524  }
525
526  dataModule.io.uncache.wen := false.B
527  when(io.uncache.resp.fire()){
528    datavalid(deqPtr) := true.B
529    dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0))
530    dataModule.io.uncache.wen := true.B
531
532    XSDebug("uncache resp: data %x\n", io.dcache.resp.bits.data)
533  }
534
535  // Read vaddr for mem exception
536  io.exceptionAddr.vaddr := dataModule.io.rdata(io.exceptionAddr.lsIdx.lqIdx.value).vaddr
537
538  // misprediction recovery / exception redirect
539  // invalidate lq term using robIdx
540  val needCancel = Wire(Vec(LoadQueueSize, Bool()))
541  for (i <- 0 until LoadQueueSize) {
542    needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect) && allocated(i) && !commited(i)
543    when (needCancel(i)) {
544        allocated(i) := false.B
545    }
546  }
547
548  /**
549    * update pointers
550    */
551  val lastCycleCancelCount = PopCount(RegNext(needCancel))
552  // when io.brqRedirect.valid, we don't allow eneuque even though it may fire.
553  val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid, PopCount(io.enq.req.map(_.valid)), 0.U)
554  when (lastCycleRedirect.valid) {
555    // we recover the pointers in the next cycle after redirect
556    enqPtrExt := VecInit(enqPtrExt.map(_ - lastCycleCancelCount))
557  }.otherwise {
558    enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
559  }
560
561  val commitCount = PopCount(loadCommit)
562  deqPtrExt := deqPtrExt + commitCount
563
564  val lastLastCycleRedirect = RegNext(lastCycleRedirect.valid)
565  val trueValidCounter = distanceBetween(enqPtrExt(0), deqPtrExt)
566  validCounter := Mux(lastLastCycleRedirect,
567    trueValidCounter,
568    validCounter + enqNumber - commitCount
569  )
570
571  allowEnqueue := Mux(io.brqRedirect.valid,
572    false.B,
573    Mux(lastLastCycleRedirect,
574      trueValidCounter <= (LoadQueueSize - RenameWidth).U,
575      validCounter + enqNumber <= (LoadQueueSize - RenameWidth).U
576    )
577  )
578
579  // debug info
580  XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr)
581
582  def PrintFlag(flag: Bool, name: String): Unit = {
583    when(flag) {
584      XSDebug(false, true.B, name)
585    }.otherwise {
586      XSDebug(false, true.B, " ")
587    }
588  }
589
590  for (i <- 0 until LoadQueueSize) {
591    if (i % 4 == 0) XSDebug("")
592    XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.rdata(i).paddr)
593    PrintFlag(allocated(i), "a")
594    PrintFlag(allocated(i) && datavalid(i), "v")
595    PrintFlag(allocated(i) && writebacked(i), "w")
596    PrintFlag(allocated(i) && commited(i), "c")
597    PrintFlag(allocated(i) && miss(i), "m")
598    PrintFlag(allocated(i) && listening(i), "l")
599    PrintFlag(allocated(i) && pending(i), "p")
600    XSDebug(false, true.B, " ")
601    if (i % 4 == 3 || i == LoadQueueSize - 1) XSDebug(false, true.B, "\n")
602  }
603
604}
605