xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueue.scala (revision 7f1506e34f4f1556f09fd3d96108d0b558ad4881)
1package xiangshan.mem
2
3import chisel3._
4import chisel3.util._
5import freechips.rocketchip.tile.HasFPUParameters
6import utils._
7import xiangshan._
8import xiangshan.cache._
9import xiangshan.cache.{DCacheLineIO, DCacheWordIO, MemoryOpConstants, TlbRequestIO}
10import xiangshan.backend.LSUOpType
11import xiangshan.mem._
12import xiangshan.backend.roq.RoqPtr
13
14
15class LqPtr extends CircularQueuePtr(LqPtr.LoadQueueSize) { }
16
17object LqPtr extends HasXSParameter {
18  def apply(f: Bool, v: UInt): LqPtr = {
19    val ptr = Wire(new LqPtr)
20    ptr.flag := f
21    ptr.value := v
22    ptr
23  }
24}
25
26trait HasLoadHelper { this: XSModule =>
27  def rdataHelper(uop: MicroOp, rdata: UInt): UInt = {
28    val lwIntData = SignExt(rdata(31, 0), XLEN)
29    val ldIntData = SignExt(rdata(63, 0), XLEN)
30    val lwFpData = recode(rdata(31, 0), S)
31    val ldFpData = recode(rdata(63, 0), D)
32    val fpWen = uop.ctrl.fpWen
33    LookupTree(uop.ctrl.fuOpType, List(
34      LSUOpType.lb   -> SignExt(rdata(7, 0) , XLEN),
35      LSUOpType.lh   -> SignExt(rdata(15, 0), XLEN),
36      LSUOpType.lw   -> Mux(fpWen, lwFpData, lwIntData),
37      LSUOpType.ld   -> Mux(fpWen, ldFpData, ldIntData),
38      LSUOpType.lbu  -> ZeroExt(rdata(7, 0) , XLEN),
39      LSUOpType.lhu  -> ZeroExt(rdata(15, 0), XLEN),
40      LSUOpType.lwu  -> ZeroExt(rdata(31, 0), XLEN),
41    ))
42  }
43}
44
45
46// Load Queue
47class LoadQueue extends XSModule
48  with HasDCacheParameters
49  with HasCircularQueuePtrHelper
50  with HasLoadHelper
51{
52  val io = IO(new Bundle() {
53    val enq = new Bundle() {
54      val canAccept = Output(Bool())
55      val req = Vec(RenameWidth, Flipped(ValidIO(new MicroOp)))
56      val resp = Vec(RenameWidth, Output(new LqPtr))
57    }
58    val brqRedirect = Input(Valid(new Redirect))
59    val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle)))
60    val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) // FIXME: Valid() only
61    val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback load
62    val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
63    val commits = Flipped(new RoqCommitIO)
64    val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store
65    val dcache = new DCacheLineIO
66    val uncache = new DCacheWordIO
67    val roqDeqPtr = Input(new RoqPtr)
68    val exceptionAddr = new ExceptionAddrIO
69    // val refill = Flipped(Valid(new DCacheLineReq ))
70  })
71
72  val uop = Reg(Vec(LoadQueueSize, new MicroOp))
73  // val data = Reg(Vec(LoadQueueSize, new LsRoqEntry))
74  val dataModule = Module(new LSQueueData(LoadQueueSize, LoadPipelineWidth))
75  dataModule.io := DontCare
76  val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated
77  val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid
78  val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
79  val commited = Reg(Vec(LoadQueueSize, Bool())) // inst has been writebacked to CDB
80  val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request
81  val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result
82  val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq
83
84  val enqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
85  val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
86  val enqPtr = enqPtrExt.value
87  val deqPtr = deqPtrExt.value
88  val sameFlag = enqPtrExt.flag === deqPtrExt.flag
89  val isEmpty = enqPtr === deqPtr && sameFlag
90  val isFull = enqPtr === deqPtr && !sameFlag
91  val allowIn = !isFull
92
93  val loadCommit = (0 until CommitWidth).map(i => io.commits.valid(i) && !io.commits.isWalk && io.commits.uop(i).ctrl.commitType === CommitType.LOAD)
94  val mcommitIdx = (0 until CommitWidth).map(i => io.commits.uop(i).lqIdx.value)
95
96  val deqMask = UIntToMask(deqPtr, LoadQueueSize)
97  val enqMask = UIntToMask(enqPtr, LoadQueueSize)
98  val enqDeqMask1 = deqMask ^ enqMask
99  val enqDeqMask = Mux(sameFlag, enqDeqMask1, ~enqDeqMask1)
100
101  // Enqueue at dispatch
102  val validEntries = distanceBetween(enqPtrExt, deqPtrExt)
103  val firedDispatch = io.enq.req.map(_.valid)
104  io.enq.canAccept := validEntries <= (LoadQueueSize - RenameWidth).U
105  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(firedDispatch))}\n")
106  for (i <- 0 until RenameWidth) {
107    val offset = if (i == 0) 0.U else PopCount((0 until i).map(firedDispatch(_)))
108    val lqIdx = enqPtrExt + offset
109    val index = lqIdx.value
110    when(io.enq.req(i).valid) {
111      uop(index) := io.enq.req(i).bits
112      allocated(index) := true.B
113      datavalid(index) := false.B
114      writebacked(index) := false.B
115      commited(index) := false.B
116      miss(index) := false.B
117      listening(index) := false.B
118      pending(index) := false.B
119    }
120    io.enq.resp(i) := lqIdx
121
122    XSError(!io.enq.canAccept && io.enq.req(i).valid, "should not valid when not ready\n")
123  }
124
125  when(Cat(firedDispatch).orR) {
126    enqPtrExt := enqPtrExt + PopCount(firedDispatch)
127    XSInfo("dispatched %d insts to lq\n", PopCount(firedDispatch))
128  }
129
130  // writeback load
131  (0 until LoadPipelineWidth).map(i => {
132    dataModule.io.wb(i).wen := false.B
133    when(io.loadIn(i).fire()) {
134      when(io.loadIn(i).bits.miss) {
135        XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n",
136          io.loadIn(i).bits.uop.lqIdx.asUInt,
137          io.loadIn(i).bits.uop.cf.pc,
138          io.loadIn(i).bits.vaddr,
139          io.loadIn(i).bits.paddr,
140          io.loadIn(i).bits.data,
141          io.loadIn(i).bits.mask,
142          io.loadIn(i).bits.forwardData.asUInt,
143          io.loadIn(i).bits.forwardMask.asUInt,
144          io.loadIn(i).bits.mmio,
145          io.loadIn(i).bits.rollback,
146          io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
147          )
148        }.otherwise {
149          XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n",
150          io.loadIn(i).bits.uop.lqIdx.asUInt,
151          io.loadIn(i).bits.uop.cf.pc,
152          io.loadIn(i).bits.vaddr,
153          io.loadIn(i).bits.paddr,
154          io.loadIn(i).bits.data,
155          io.loadIn(i).bits.mask,
156          io.loadIn(i).bits.forwardData.asUInt,
157          io.loadIn(i).bits.forwardMask.asUInt,
158          io.loadIn(i).bits.mmio,
159          io.loadIn(i).bits.rollback,
160          io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
161          )
162        }
163        val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
164        datavalid(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
165        writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
166        allocated(loadWbIndex) := !io.loadIn(i).bits.uop.cf.exceptionVec.asUInt.orR
167
168        val loadWbData = Wire(new LsqEntry)
169        loadWbData.paddr := io.loadIn(i).bits.paddr
170        loadWbData.vaddr := io.loadIn(i).bits.vaddr
171        loadWbData.mask := io.loadIn(i).bits.mask
172        loadWbData.data := io.loadIn(i).bits.data // for mmio / misc / debug
173        loadWbData.mmio := io.loadIn(i).bits.mmio
174        loadWbData.fwdMask := io.loadIn(i).bits.forwardMask
175        loadWbData.fwdData := io.loadIn(i).bits.forwardData
176        loadWbData.exception := io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
177        dataModule.io.wbWrite(i, loadWbIndex, loadWbData)
178        dataModule.io.wb(i).wen := true.B
179
180        val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
181        miss(loadWbIndex) := dcacheMissed
182        listening(loadWbIndex) := dcacheMissed
183        pending(loadWbIndex) := io.loadIn(i).bits.mmio
184      }
185    })
186
187  // cache miss request
188  val inflightReqs = RegInit(VecInit(Seq.fill(cfg.nLoadMissEntries)(0.U.asTypeOf(new InflightBlockInfo))))
189  val inflightReqFull = inflightReqs.map(req => req.valid).reduce(_&&_)
190  val reqBlockIndex = PriorityEncoder(~VecInit(inflightReqs.map(req => req.valid)).asUInt)
191
192  val missRefillSelVec = VecInit(
193    (0 until LoadQueueSize).map{ i =>
194      val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(dataModule.io.rdata(i).paddr)).reduce(_||_)
195      allocated(i) && miss(i) && !inflight
196    })
197
198  val missRefillSel = getFirstOne(missRefillSelVec, deqMask)
199  val missRefillBlockAddr = get_block_addr(dataModule.io.rdata(missRefillSel).paddr)
200  io.dcache.req.valid := missRefillSelVec.asUInt.orR
201  io.dcache.req.bits.cmd := MemoryOpConstants.M_XRD
202  io.dcache.req.bits.addr := missRefillBlockAddr
203  io.dcache.req.bits.data := DontCare
204  io.dcache.req.bits.mask := DontCare
205
206  io.dcache.req.bits.meta.id       := DontCare
207  io.dcache.req.bits.meta.vaddr    := DontCare // dataModule.io.rdata(missRefillSel).vaddr
208  io.dcache.req.bits.meta.paddr    := missRefillBlockAddr
209  io.dcache.req.bits.meta.uop      := uop(missRefillSel)
210  io.dcache.req.bits.meta.mmio     := false.B // dataModule.io.rdata(missRefillSel).mmio
211  io.dcache.req.bits.meta.tlb_miss := false.B
212  io.dcache.req.bits.meta.mask     := DontCare
213  io.dcache.req.bits.meta.replay   := false.B
214
215  io.dcache.resp.ready := true.B
216
217  assert(!(dataModule.io.rdata(missRefillSel).mmio && io.dcache.req.valid))
218
219  when(io.dcache.req.fire()) {
220    miss(missRefillSel) := false.B
221    listening(missRefillSel) := true.B
222
223    // mark this block as inflight
224    inflightReqs(reqBlockIndex).valid := true.B
225    inflightReqs(reqBlockIndex).block_addr := missRefillBlockAddr
226    assert(!inflightReqs(reqBlockIndex).valid)
227  }
228
229  when(io.dcache.resp.fire()) {
230    val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)).reduce(_||_)
231    assert(inflight)
232    for (i <- 0 until cfg.nLoadMissEntries) {
233      when (inflightReqs(i).valid && inflightReqs(i).block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)) {
234        inflightReqs(i).valid := false.B
235      }
236    }
237  }
238
239
240  when(io.dcache.req.fire()){
241    XSDebug("miss req: pc:0x%x roqIdx:%d lqIdx:%d (p)addr:0x%x vaddr:0x%x\n",
242      io.dcache.req.bits.meta.uop.cf.pc, io.dcache.req.bits.meta.uop.roqIdx.asUInt, io.dcache.req.bits.meta.uop.lqIdx.asUInt,
243      io.dcache.req.bits.addr, io.dcache.req.bits.meta.vaddr
244    )
245  }
246
247  when(io.dcache.resp.fire()){
248    XSDebug("miss resp: pc:0x%x roqIdx:%d lqIdx:%d (p)addr:0x%x data %x\n",
249      io.dcache.resp.bits.meta.uop.cf.pc, io.dcache.resp.bits.meta.uop.roqIdx.asUInt, io.dcache.resp.bits.meta.uop.lqIdx.asUInt,
250      io.dcache.resp.bits.meta.paddr, io.dcache.resp.bits.data
251    )
252  }
253
254  // Refill 64 bit in a cycle
255  // Refill data comes back from io.dcache.resp
256  dataModule.io.refill.dcache := io.dcache.resp.bits
257
258  (0 until LoadQueueSize).map(i => {
259    val blockMatch = get_block_addr(dataModule.io.rdata(i).paddr) === io.dcache.resp.bits.meta.paddr
260    dataModule.io.refill.wen(i) := false.B
261    when(allocated(i) && listening(i) && blockMatch && io.dcache.resp.fire()) {
262      dataModule.io.refill.wen(i) := true.B
263      datavalid(i) := true.B
264      listening(i) := false.B
265    }
266  })
267
268  // writeback up to 2 missed load insts to CDB
269  // just randomly pick 2 missed load (data refilled), write them back to cdb
270  val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => {
271    allocated(i) && datavalid(i) && !writebacked(i)
272  })).asUInt() // use uint instead vec to reduce verilog lines
273  val loadWbSel = Wire(Vec(StorePipelineWidth, UInt(log2Up(LoadQueueSize).W)))
274  val loadWbSelV= Wire(Vec(StorePipelineWidth, Bool()))
275  val lselvec0 = PriorityEncoderOH(loadWbSelVec)
276  val lselvec1 = PriorityEncoderOH(loadWbSelVec & (~lselvec0).asUInt)
277  loadWbSel(0) := OHToUInt(lselvec0)
278  loadWbSelV(0):= lselvec0.orR
279  loadWbSel(1) := OHToUInt(lselvec1)
280  loadWbSelV(1) := lselvec1.orR
281  (0 until StorePipelineWidth).map(i => {
282    // data select
283    val rdata = dataModule.io.rdata(loadWbSel(i)).data
284    val func = uop(loadWbSel(i)).ctrl.fuOpType
285    val raddr = dataModule.io.rdata(loadWbSel(i)).paddr
286    val rdataSel = LookupTree(raddr(2, 0), List(
287      "b000".U -> rdata(63, 0),
288      "b001".U -> rdata(63, 8),
289      "b010".U -> rdata(63, 16),
290      "b011".U -> rdata(63, 24),
291      "b100".U -> rdata(63, 32),
292      "b101".U -> rdata(63, 40),
293      "b110".U -> rdata(63, 48),
294      "b111".U -> rdata(63, 56)
295    ))
296    val rdataPartialLoad = rdataHelper(uop(loadWbSel(i)), rdataSel)
297    io.ldout(i).bits.uop := uop(loadWbSel(i))
298    io.ldout(i).bits.uop.cf.exceptionVec := dataModule.io.rdata(loadWbSel(i)).exception.asBools
299    io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr)
300    io.ldout(i).bits.data := rdataPartialLoad
301    io.ldout(i).bits.redirectValid := false.B
302    io.ldout(i).bits.redirect := DontCare
303    io.ldout(i).bits.brUpdate := DontCare
304    io.ldout(i).bits.debug.isMMIO := dataModule.io.rdata(loadWbSel(i)).mmio
305    io.ldout(i).bits.fflags := DontCare
306    io.ldout(i).valid := loadWbSelVec(loadWbSel(i)) && loadWbSelV(i)
307    when(io.ldout(i).fire()) {
308      writebacked(loadWbSel(i)) := true.B
309      XSInfo("load miss write to cbd roqidx %d lqidx %d pc 0x%x paddr %x data %x mmio %x\n",
310        io.ldout(i).bits.uop.roqIdx.asUInt,
311        io.ldout(i).bits.uop.lqIdx.asUInt,
312        io.ldout(i).bits.uop.cf.pc,
313        dataModule.io.rdata(loadWbSel(i)).paddr,
314        dataModule.io.rdata(loadWbSel(i)).data,
315        dataModule.io.rdata(loadWbSel(i)).mmio
316      )
317    }
318  })
319
320  // move tailPtr
321  // allocatedMask: dequeuePtr can go to the next 1-bit
322  val allocatedMask = VecInit((0 until LoadQueueSize).map(i => allocated(i) || !enqDeqMask(i)))
323  // find the first one from deqPtr (deqPtr)
324  val nextTail1 = getFirstOneWithFlag(allocatedMask, deqMask, deqPtrExt.flag)
325  val nextTail = Mux(Cat(allocatedMask).orR, nextTail1, enqPtrExt)
326  deqPtrExt := nextTail
327
328  // When load commited, mark it as !allocated, this entry will be recycled later
329  (0 until CommitWidth).map(i => {
330    when(loadCommit(i)) {
331      allocated(mcommitIdx(i)) := false.B
332      XSDebug("load commit %d: idx %d %x\n", i.U, mcommitIdx(i), uop(mcommitIdx(i)).cf.pc)
333    }
334  })
335
336  def getFirstOne(mask: Vec[Bool], startMask: UInt) = {
337    val length = mask.length
338    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
339    val highBitsUint = Cat(highBits.reverse)
340    PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt))
341  }
342
343  def getFirstOneWithFlag(mask: Vec[Bool], startMask: UInt, startFlag: Bool) = {
344    val length = mask.length
345    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
346    val highBitsUint = Cat(highBits.reverse)
347    val changeDirection = !highBitsUint.orR()
348    val index = PriorityEncoder(Mux(!changeDirection, highBitsUint, mask.asUInt))
349    LqPtr(startFlag ^ changeDirection, index)
350  }
351
352  def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = {
353    assert(valid.length == uop.length)
354    assert(valid.length == 2)
355    Mux(valid(0) && valid(1),
356      Mux(isAfter(uop(0).roqIdx, uop(1).roqIdx), uop(1), uop(0)),
357      Mux(valid(0) && !valid(1), uop(0), uop(1)))
358  }
359
360  def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = {
361    assert(valid.length == uop.length)
362    val length = valid.length
363    (0 until length).map(i => {
364      (0 until length).map(j => {
365        Mux(valid(i) && valid(j),
366          isAfter(uop(i).roqIdx, uop(j).roqIdx),
367          Mux(!valid(i), true.B, false.B))
368      })
369    })
370  }
371
372  def rangeMask(start: LqPtr, end: LqPtr): UInt = {
373    val startMask = (1.U((LoadQueueSize + 1).W) << start.value).asUInt - 1.U
374    val endMask = (1.U((LoadQueueSize + 1).W) << end.value).asUInt - 1.U
375    val xorMask = startMask(LoadQueueSize - 1, 0) ^ endMask(LoadQueueSize - 1, 0)
376    Mux(start.flag === end.flag, xorMask, ~xorMask)
377  }
378
379  // ignore data forward
380  (0 until LoadPipelineWidth).foreach(i => {
381    io.forward(i).forwardMask := DontCare
382    io.forward(i).forwardData := DontCare
383  })
384
385  // store backward query and rollback
386  def detectRollback(i: Int) = {
387    val startIndex = io.storeIn(i).bits.uop.lqIdx.value
388    val lqIdxMask = UIntToMask(startIndex, LoadQueueSize)
389    val xorMask = lqIdxMask ^ enqMask
390    val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt.flag
391    val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
392
393    // check if load already in lq needs to be rolledback
394    val lqViolationVec = RegNext(VecInit((0 until LoadQueueSize).map(j => {
395      val addrMatch = allocated(j) &&
396        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === dataModule.io.rdata(j).paddr(PAddrBits - 1, 3)
397      val entryNeedCheck = toEnqPtrMask(j) && addrMatch && (datavalid(j) || listening(j) || miss(j))
398      // TODO: update refilled data
399      val violationVec = (0 until 8).map(k => dataModule.io.rdata(j).mask(k) && io.storeIn(i).bits.mask(k))
400      Cat(violationVec).orR() && entryNeedCheck
401    })))
402    val lqViolation = lqViolationVec.asUInt().orR()
403    val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask))
404    val lqViolationUop = uop(lqViolationIndex)
405    // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag
406    // lqViolationUop.lqIdx.value := lqViolationIndex
407    XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n")
408
409    // when l/s writeback to roq together, check if rollback is needed
410    val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
411      io.loadIn(j).valid &&
412        isAfter(io.loadIn(j).bits.uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
413        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) &&
414        (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR
415    })))
416    val wbViolation = wbViolationVec.asUInt().orR()
417    val wbViolationUop = getOldestInTwo(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits.uop))))
418    XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n")
419
420    // check if rollback is needed for load in l1
421    val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
422      io.forward(j).valid && // L1 valid
423        isAfter(io.forward(j).uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
424        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.forward(j).paddr(PAddrBits - 1, 3) &&
425        (io.storeIn(i).bits.mask & io.forward(j).mask).orR
426    })))
427    val l1Violation = l1ViolationVec.asUInt().orR()
428    val l1ViolationUop = getOldestInTwo(l1ViolationVec, RegNext(VecInit(io.forward.map(_.uop))))
429    XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n")
430
431    val rollbackValidVec = Seq(lqViolation, wbViolation, l1Violation)
432    val rollbackUopVec = Seq(lqViolationUop, wbViolationUop, l1ViolationUop)
433
434    val mask = getAfterMask(rollbackValidVec, rollbackUopVec)
435    val oneAfterZero = mask(1)(0)
436    val rollbackUop = Mux(oneAfterZero && mask(2)(0),
437      rollbackUopVec(0),
438      Mux(!oneAfterZero && mask(2)(1), rollbackUopVec(1), rollbackUopVec(2)))
439
440    XSDebug(
441      l1Violation,
442      "need rollback (l4 load) pc %x roqidx %d target %x\n",
443      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, l1ViolationUop.roqIdx.asUInt
444    )
445    XSDebug(
446      lqViolation,
447      "need rollback (ld wb before store) pc %x roqidx %d target %x\n",
448      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, lqViolationUop.roqIdx.asUInt
449    )
450    XSDebug(
451      wbViolation,
452      "need rollback (ld/st wb together) pc %x roqidx %d target %x\n",
453      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, wbViolationUop.roqIdx.asUInt
454    )
455
456    (RegNext(io.storeIn(i).valid) && Cat(rollbackValidVec).orR, rollbackUop)
457  }
458
459  // rollback check
460  val rollback = Wire(Vec(StorePipelineWidth, Valid(new MicroOp)))
461  for (i <- 0 until StorePipelineWidth) {
462    val detectedRollback = detectRollback(i)
463    rollback(i).valid := detectedRollback._1
464    rollback(i).bits := detectedRollback._2
465  }
466
467  def rollbackSel(a: Valid[MicroOp], b: Valid[MicroOp]): ValidIO[MicroOp] = {
468    Mux(
469      a.valid,
470      Mux(
471        b.valid,
472        Mux(isAfter(a.bits.roqIdx, b.bits.roqIdx), b, a), // a,b both valid, sel oldest
473        a // sel a
474      ),
475      b // sel b
476    )
477  }
478
479  val rollbackSelected = ParallelOperation(rollback, rollbackSel)
480  val lastCycleRedirect = RegNext(io.brqRedirect)
481
482  io.rollback := DontCare
483  // Note that we use roqIdx - 1.U to flush the load instruction itself.
484  // Thus, here if last cycle's roqIdx equals to this cycle's roqIdx, it still triggers the redirect.
485  io.rollback.valid := rollbackSelected.valid && (!lastCycleRedirect.valid || !isAfter(rollbackSelected.bits.roqIdx, lastCycleRedirect.bits.roqIdx))
486
487  io.rollback.bits.roqIdx := rollbackSelected.bits.roqIdx - 1.U
488  io.rollback.bits.isReplay := true.B
489  io.rollback.bits.isMisPred := false.B
490  io.rollback.bits.isException := false.B
491  io.rollback.bits.isFlushPipe := false.B
492  io.rollback.bits.target := rollbackSelected.bits.cf.pc
493  io.rollback.bits.brTag := rollbackSelected.bits.brTag
494
495  // Memory mapped IO / other uncached operations
496
497  // setup misc mem access req
498  // mask / paddr / data can be get from lq.data
499  val commitType = io.commits.uop(0).ctrl.commitType
500  io.uncache.req.valid := pending(deqPtr) && allocated(deqPtr) &&
501    commitType === CommitType.LOAD &&
502    io.roqDeqPtr === uop(deqPtr).roqIdx &&
503    !io.commits.isWalk
504
505  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
506  io.uncache.req.bits.addr := dataModule.io.rdata(deqPtr).paddr
507  io.uncache.req.bits.data := dataModule.io.rdata(deqPtr).data
508  io.uncache.req.bits.mask := dataModule.io.rdata(deqPtr).mask
509
510  io.uncache.req.bits.meta.id       := DontCare // TODO: // FIXME
511  io.uncache.req.bits.meta.vaddr    := DontCare
512  io.uncache.req.bits.meta.paddr    := dataModule.io.rdata(deqPtr).paddr
513  io.uncache.req.bits.meta.uop      := uop(deqPtr)
514  io.uncache.req.bits.meta.mmio     := true.B // dataModule.io.rdata(deqPtr).mmio
515  io.uncache.req.bits.meta.tlb_miss := false.B
516  io.uncache.req.bits.meta.mask     := dataModule.io.rdata(deqPtr).mask
517  io.uncache.req.bits.meta.replay   := false.B
518
519  io.uncache.resp.ready := true.B
520
521  when (io.uncache.req.fire()) {
522    pending(deqPtr) := false.B
523  }
524
525  dataModule.io.uncache.wen := false.B
526  when(io.uncache.resp.fire()){
527    datavalid(deqPtr) := true.B
528    dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0))
529    dataModule.io.uncache.wen := true.B
530    // TODO: write back exception info
531  }
532
533  when(io.uncache.req.fire()){
534    XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n",
535      uop(deqPtr).cf.pc,
536      io.uncache.req.bits.addr,
537      io.uncache.req.bits.data,
538      io.uncache.req.bits.cmd,
539      io.uncache.req.bits.mask
540    )
541  }
542
543  when(io.uncache.resp.fire()){
544    XSDebug("uncache resp: data %x\n", io.dcache.resp.bits.data)
545  }
546
547  // Read vaddr for mem exception
548  io.exceptionAddr.vaddr := dataModule.io.rdata(io.exceptionAddr.lsIdx.lqIdx.value).vaddr
549
550  // misprediction recovery / exception redirect
551  // invalidate lq term using robIdx
552  val needCancel = Wire(Vec(LoadQueueSize, Bool()))
553  for (i <- 0 until LoadQueueSize) {
554    needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect) && allocated(i) && !commited(i)
555    when(needCancel(i)) {
556      // when(io.brqRedirect.bits.isReplay){
557      //   valid(i) := false.B
558      //   writebacked(i) := false.B
559      //   listening(i) := false.B
560      //   miss(i) := false.B
561      //   pending(i) := false.B
562      // }.otherwise{
563        allocated(i) := false.B
564      // }
565    }
566  }
567  when (io.brqRedirect.valid && io.brqRedirect.bits.isMisPred) {
568    enqPtrExt := enqPtrExt - PopCount(needCancel)
569  }
570
571  // assert(!io.rollback.valid)
572  when(io.rollback.valid) {
573    XSDebug("Mem rollback: pc %x roqidx %d\n", io.rollback.bits.pc, io.rollback.bits.roqIdx.asUInt)
574  }
575
576  // debug info
577  XSDebug("head %d:%d tail %d:%d\n", enqPtrExt.flag, enqPtr, deqPtrExt.flag, deqPtr)
578
579  def PrintFlag(flag: Bool, name: String): Unit = {
580    when(flag) {
581      XSDebug(false, true.B, name)
582    }.otherwise {
583      XSDebug(false, true.B, " ")
584    }
585  }
586
587  for (i <- 0 until LoadQueueSize) {
588    if (i % 4 == 0) XSDebug("")
589    XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.rdata(i).paddr)
590    PrintFlag(allocated(i), "a")
591    PrintFlag(allocated(i) && datavalid(i), "v")
592    PrintFlag(allocated(i) && writebacked(i), "w")
593    PrintFlag(allocated(i) && commited(i), "c")
594    PrintFlag(allocated(i) && miss(i), "m")
595    PrintFlag(allocated(i) && listening(i), "l")
596    PrintFlag(allocated(i) && pending(i), "p")
597    XSDebug(false, true.B, " ")
598    if (i % 4 == 3 || i == LoadQueueSize - 1) XSDebug(false, true.B, "\n")
599  }
600
601}
602