xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueue.scala (revision a1fd7de4103f2448006f7bd974fd59cb9c6e7c7b)
1package xiangshan.mem
2
3import chisel3._
4import chisel3.util._
5import freechips.rocketchip.tile.HasFPUParameters
6import utils._
7import xiangshan._
8import xiangshan.cache._
9import xiangshan.cache.{DCacheLineIO, DCacheWordIO, MemoryOpConstants, TlbRequestIO}
10import xiangshan.backend.LSUOpType
11import xiangshan.mem._
12import xiangshan.backend.roq.RoqPtr
13
14
15class LqPtr extends CircularQueuePtr(LqPtr.LoadQueueSize) { }
16
17object LqPtr extends HasXSParameter {
18  def apply(f: Bool, v: UInt): LqPtr = {
19    val ptr = Wire(new LqPtr)
20    ptr.flag := f
21    ptr.value := v
22    ptr
23  }
24}
25
26trait HasLoadHelper { this: XSModule =>
27  def rdataHelper(uop: MicroOp, rdata: UInt): UInt = {
28    val lwIntData = SignExt(rdata(31, 0), XLEN)
29    val ldIntData = SignExt(rdata(63, 0), XLEN)
30    val lwFpData = recode(rdata(31, 0), S)
31    val ldFpData = recode(rdata(63, 0), D)
32    val fpWen = uop.ctrl.fpWen
33    LookupTree(uop.ctrl.fuOpType, List(
34      LSUOpType.lb   -> SignExt(rdata(7, 0) , XLEN),
35      LSUOpType.lh   -> SignExt(rdata(15, 0), XLEN),
36      LSUOpType.lw   -> Mux(fpWen, lwFpData, lwIntData),
37      LSUOpType.ld   -> Mux(fpWen, ldFpData, ldIntData),
38      LSUOpType.lbu  -> ZeroExt(rdata(7, 0) , XLEN),
39      LSUOpType.lhu  -> ZeroExt(rdata(15, 0), XLEN),
40      LSUOpType.lwu  -> ZeroExt(rdata(31, 0), XLEN),
41    ))
42  }
43}
44
45class LqEnqIO extends XSBundle {
46  val canAccept = Output(Bool())
47  val sqCanAccept = Input(Bool())
48  val needAlloc = Vec(RenameWidth, Input(Bool()))
49  val req = Vec(RenameWidth, Flipped(ValidIO(new MicroOp)))
50  val resp = Vec(RenameWidth, Output(new LqPtr))
51}
52
53// Load Queue
54class LoadQueue extends XSModule
55  with HasDCacheParameters
56  with HasCircularQueuePtrHelper
57  with HasLoadHelper
58{
59  val io = IO(new Bundle() {
60    val enq = new LqEnqIO
61    val brqRedirect = Input(Valid(new Redirect))
62    val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle)))
63    val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) // FIXME: Valid() only
64    val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback load
65    val load_s1 = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
66    val commits = Flipped(new RoqCommitIO)
67    val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store
68    val dcache = new DCacheLineIO
69    val uncache = new DCacheWordIO
70    val roqDeqPtr = Input(new RoqPtr)
71    val exceptionAddr = new ExceptionAddrIO
72  })
73
74  val uop = Reg(Vec(LoadQueueSize, new MicroOp))
75  // val data = Reg(Vec(LoadQueueSize, new LsRoqEntry))
76  val dataModule = Module(new LSQueueData(LoadQueueSize, LoadPipelineWidth))
77  dataModule.io := DontCare
78  val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated
79  val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid
80  val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
81  val commited = Reg(Vec(LoadQueueSize, Bool())) // inst has been writebacked to CDB
82  val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request
83  val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result
84  val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq
85
86  val enqPtrExt = RegInit(VecInit((0 until RenameWidth).map(_.U.asTypeOf(new LqPtr))))
87  val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
88  val validCounter = RegInit(0.U(log2Ceil(LoadQueueSize + 1).W))
89  val allowEnqueue = RegInit(true.B)
90
91  val enqPtr = enqPtrExt(0).value
92  val deqPtr = deqPtrExt.value
93  val sameFlag = enqPtrExt(0).flag === deqPtrExt.flag
94  val isEmpty = enqPtr === deqPtr && sameFlag
95  val isFull = enqPtr === deqPtr && !sameFlag
96  val allowIn = !isFull
97
98  val loadCommit = (0 until CommitWidth).map(i => io.commits.valid(i) && !io.commits.isWalk && io.commits.info(i).commitType === CommitType.LOAD)
99  val mcommitIdx = (0 until CommitWidth).map(i => io.commits.info(i).lqIdx.value)
100
101  val deqMask = UIntToMask(deqPtr, LoadQueueSize)
102  val enqMask = UIntToMask(enqPtr, LoadQueueSize)
103
104  /**
105    * Enqueue at dispatch
106    *
107    * Currently, LoadQueue only allows enqueue when #emptyEntries > RenameWidth(EnqWidth)
108    */
109  io.enq.canAccept := allowEnqueue
110
111  for (i <- 0 until RenameWidth) {
112    val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i))
113    val lqIdx = enqPtrExt(offset)
114    val index = lqIdx.value
115    when (io.enq.req(i).valid && io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid) {
116      uop(index) := io.enq.req(i).bits
117      allocated(index) := true.B
118      datavalid(index) := false.B
119      writebacked(index) := false.B
120      commited(index) := false.B
121      miss(index) := false.B
122      listening(index) := false.B
123      pending(index) := false.B
124    }
125    io.enq.resp(i) := lqIdx
126  }
127  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
128
129  /**
130    * Writeback load from load units
131    *
132    * Most load instructions writeback to regfile at the same time.
133    * However,
134    *   (1) For an mmio instruction with exceptions, it writes back to ROB immediately.
135    *   (2) For an mmio instruction without exceptions, it does not write back.
136    * The mmio instruction will be sent to lower level when it reaches ROB's head.
137    * After uncache response, it will write back through arbiter with loadUnit.
138    *   (3) For cache misses, it is marked miss and sent to dcache later.
139    * After cache refills, it will write back through arbiter with loadUnit.
140    */
141  for (i <- 0 until LoadPipelineWidth) {
142    dataModule.io.wb(i).wen := false.B
143    when(io.loadIn(i).fire()) {
144      when(io.loadIn(i).bits.miss) {
145        XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n",
146          io.loadIn(i).bits.uop.lqIdx.asUInt,
147          io.loadIn(i).bits.uop.cf.pc,
148          io.loadIn(i).bits.vaddr,
149          io.loadIn(i).bits.paddr,
150          io.loadIn(i).bits.data,
151          io.loadIn(i).bits.mask,
152          io.loadIn(i).bits.forwardData.asUInt,
153          io.loadIn(i).bits.forwardMask.asUInt,
154          io.loadIn(i).bits.mmio,
155          io.loadIn(i).bits.rollback,
156          io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
157          )
158        }.otherwise {
159          XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n",
160          io.loadIn(i).bits.uop.lqIdx.asUInt,
161          io.loadIn(i).bits.uop.cf.pc,
162          io.loadIn(i).bits.vaddr,
163          io.loadIn(i).bits.paddr,
164          io.loadIn(i).bits.data,
165          io.loadIn(i).bits.mask,
166          io.loadIn(i).bits.forwardData.asUInt,
167          io.loadIn(i).bits.forwardMask.asUInt,
168          io.loadIn(i).bits.mmio,
169          io.loadIn(i).bits.rollback,
170          io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
171          )
172        }
173        val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
174        datavalid(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
175        writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
176
177        val loadWbData = Wire(new LsqEntry)
178        loadWbData.paddr := io.loadIn(i).bits.paddr
179        loadWbData.vaddr := io.loadIn(i).bits.vaddr
180        loadWbData.mask := io.loadIn(i).bits.mask
181        loadWbData.data := io.loadIn(i).bits.data // for mmio / misc / debug
182        loadWbData.mmio := io.loadIn(i).bits.mmio
183        loadWbData.fwdMask := io.loadIn(i).bits.forwardMask
184        loadWbData.fwdData := io.loadIn(i).bits.forwardData
185        loadWbData.exception := io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
186        dataModule.io.wbWrite(i, loadWbIndex, loadWbData)
187        dataModule.io.wb(i).wen := true.B
188
189        val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
190        miss(loadWbIndex) := dcacheMissed && !io.loadIn(i).bits.uop.cf.exceptionVec.asUInt.orR
191        listening(loadWbIndex) := dcacheMissed
192        pending(loadWbIndex) := io.loadIn(i).bits.mmio && !io.loadIn(i).bits.uop.cf.exceptionVec.asUInt.orR
193      }
194    }
195
196  /**
197    * Cache miss request
198    *
199    * (1) writeback: miss
200    * (2) send to dcache: listing
201    * (3) dcache response: datavalid
202    * (4) writeback to ROB: writeback
203    */
204  val inflightReqs = RegInit(VecInit(Seq.fill(cfg.nLoadMissEntries)(0.U.asTypeOf(new InflightBlockInfo))))
205  val inflightReqFull = inflightReqs.map(req => req.valid).reduce(_&&_)
206  val reqBlockIndex = PriorityEncoder(~VecInit(inflightReqs.map(req => req.valid)).asUInt)
207
208  val missRefillSelVec = VecInit(
209    (0 until LoadQueueSize).map{ i =>
210      val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(dataModule.io.rdata(i).paddr)).reduce(_||_)
211      allocated(i) && miss(i) && !inflight
212    })
213
214  val missRefillSel = getFirstOne(missRefillSelVec, deqMask)
215  val missRefillBlockAddr = get_block_addr(dataModule.io.rdata(missRefillSel).paddr)
216  io.dcache.req.valid := missRefillSelVec.asUInt.orR
217  io.dcache.req.bits.cmd := MemoryOpConstants.M_XRD
218  io.dcache.req.bits.addr := missRefillBlockAddr
219  io.dcache.req.bits.data := DontCare
220  io.dcache.req.bits.mask := DontCare
221
222  io.dcache.req.bits.meta.id       := DontCare
223  io.dcache.req.bits.meta.vaddr    := DontCare // dataModule.io.rdata(missRefillSel).vaddr
224  io.dcache.req.bits.meta.paddr    := missRefillBlockAddr
225  io.dcache.req.bits.meta.uop      := uop(missRefillSel)
226  io.dcache.req.bits.meta.mmio     := false.B // dataModule.io.rdata(missRefillSel).mmio
227  io.dcache.req.bits.meta.tlb_miss := false.B
228  io.dcache.req.bits.meta.mask     := DontCare
229  io.dcache.req.bits.meta.replay   := false.B
230
231  io.dcache.resp.ready := true.B
232
233  assert(!(dataModule.io.rdata(missRefillSel).mmio && io.dcache.req.valid))
234
235  when(io.dcache.req.fire()) {
236    miss(missRefillSel) := false.B
237    listening(missRefillSel) := true.B
238
239    // mark this block as inflight
240    inflightReqs(reqBlockIndex).valid := true.B
241    inflightReqs(reqBlockIndex).block_addr := missRefillBlockAddr
242    assert(!inflightReqs(reqBlockIndex).valid)
243  }
244
245  when(io.dcache.resp.fire()) {
246    val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)).reduce(_||_)
247    assert(inflight)
248    for (i <- 0 until cfg.nLoadMissEntries) {
249      when (inflightReqs(i).valid && inflightReqs(i).block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)) {
250        inflightReqs(i).valid := false.B
251      }
252    }
253  }
254
255
256  when(io.dcache.req.fire()){
257    XSDebug("miss req: pc:0x%x roqIdx:%d lqIdx:%d (p)addr:0x%x vaddr:0x%x\n",
258      io.dcache.req.bits.meta.uop.cf.pc, io.dcache.req.bits.meta.uop.roqIdx.asUInt, io.dcache.req.bits.meta.uop.lqIdx.asUInt,
259      io.dcache.req.bits.addr, io.dcache.req.bits.meta.vaddr
260    )
261  }
262
263  when(io.dcache.resp.fire()){
264    XSDebug("miss resp: pc:0x%x roqIdx:%d lqIdx:%d (p)addr:0x%x data %x\n",
265      io.dcache.resp.bits.meta.uop.cf.pc, io.dcache.resp.bits.meta.uop.roqIdx.asUInt, io.dcache.resp.bits.meta.uop.lqIdx.asUInt,
266      io.dcache.resp.bits.meta.paddr, io.dcache.resp.bits.data
267    )
268  }
269
270  // Refill 64 bit in a cycle
271  // Refill data comes back from io.dcache.resp
272  dataModule.io.refill.dcache := io.dcache.resp.bits
273
274  (0 until LoadQueueSize).map(i => {
275    val blockMatch = get_block_addr(dataModule.io.rdata(i).paddr) === io.dcache.resp.bits.meta.paddr
276    dataModule.io.refill.wen(i) := false.B
277    when(allocated(i) && listening(i) && blockMatch && io.dcache.resp.fire()) {
278      dataModule.io.refill.wen(i) := true.B
279      datavalid(i) := true.B
280      listening(i) := false.B
281    }
282  })
283
284  // writeback up to 2 missed load insts to CDB
285  // just randomly pick 2 missed load (data refilled), write them back to cdb
286  val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => {
287    allocated(i) && datavalid(i) && !writebacked(i)
288  })).asUInt() // use uint instead vec to reduce verilog lines
289  val loadWbSel = Wire(Vec(StorePipelineWidth, UInt(log2Up(LoadQueueSize).W)))
290  val loadWbSelV= Wire(Vec(StorePipelineWidth, Bool()))
291  val lselvec0 = PriorityEncoderOH(loadWbSelVec)
292  val lselvec1 = PriorityEncoderOH(loadWbSelVec & (~lselvec0).asUInt)
293  loadWbSel(0) := OHToUInt(lselvec0)
294  loadWbSelV(0):= lselvec0.orR
295  loadWbSel(1) := OHToUInt(lselvec1)
296  loadWbSelV(1) := lselvec1.orR
297  (0 until StorePipelineWidth).map(i => {
298    // data select
299    val rdata = dataModule.io.rdata(loadWbSel(i)).data
300    val func = uop(loadWbSel(i)).ctrl.fuOpType
301    val raddr = dataModule.io.rdata(loadWbSel(i)).paddr
302    val rdataSel = LookupTree(raddr(2, 0), List(
303      "b000".U -> rdata(63, 0),
304      "b001".U -> rdata(63, 8),
305      "b010".U -> rdata(63, 16),
306      "b011".U -> rdata(63, 24),
307      "b100".U -> rdata(63, 32),
308      "b101".U -> rdata(63, 40),
309      "b110".U -> rdata(63, 48),
310      "b111".U -> rdata(63, 56)
311    ))
312    val rdataPartialLoad = rdataHelper(uop(loadWbSel(i)), rdataSel)
313    io.ldout(i).bits.uop := uop(loadWbSel(i))
314    io.ldout(i).bits.uop.cf.exceptionVec := dataModule.io.rdata(loadWbSel(i)).exception.asBools
315    io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr)
316    io.ldout(i).bits.data := rdataPartialLoad
317    io.ldout(i).bits.redirectValid := false.B
318    io.ldout(i).bits.redirect := DontCare
319    io.ldout(i).bits.brUpdate := DontCare
320    io.ldout(i).bits.debug.isMMIO := dataModule.io.rdata(loadWbSel(i)).mmio
321    io.ldout(i).bits.fflags := DontCare
322    io.ldout(i).valid := loadWbSelVec(loadWbSel(i)) && loadWbSelV(i)
323    when(io.ldout(i).fire()) {
324      writebacked(loadWbSel(i)) := true.B
325      XSInfo("load miss write to cbd roqidx %d lqidx %d pc 0x%x paddr %x data %x mmio %x\n",
326        io.ldout(i).bits.uop.roqIdx.asUInt,
327        io.ldout(i).bits.uop.lqIdx.asUInt,
328        io.ldout(i).bits.uop.cf.pc,
329        dataModule.io.rdata(loadWbSel(i)).paddr,
330        dataModule.io.rdata(loadWbSel(i)).data,
331        dataModule.io.rdata(loadWbSel(i)).mmio
332      )
333    }
334  })
335
336  /**
337    * Load commits
338    *
339    * When load commited, mark it as !allocated and move deqPtrExt forward.
340    */
341  (0 until CommitWidth).map(i => {
342    when(loadCommit(i)) {
343      allocated(mcommitIdx(i)) := false.B
344      XSDebug("load commit %d: idx %d %x\n", i.U, mcommitIdx(i), uop(mcommitIdx(i)).cf.pc)
345    }
346  })
347
348  def getFirstOne(mask: Vec[Bool], startMask: UInt) = {
349    val length = mask.length
350    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
351    val highBitsUint = Cat(highBits.reverse)
352    PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt))
353  }
354
355  def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = {
356    assert(valid.length == uop.length)
357    assert(valid.length == 2)
358    Mux(valid(0) && valid(1),
359      Mux(isAfter(uop(0).roqIdx, uop(1).roqIdx), uop(1), uop(0)),
360      Mux(valid(0) && !valid(1), uop(0), uop(1)))
361  }
362
363  def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = {
364    assert(valid.length == uop.length)
365    val length = valid.length
366    (0 until length).map(i => {
367      (0 until length).map(j => {
368        Mux(valid(i) && valid(j),
369          isAfter(uop(i).roqIdx, uop(j).roqIdx),
370          Mux(!valid(i), true.B, false.B))
371      })
372    })
373  }
374
375  /**
376    * Memory violation detection
377    *
378    * When store writes back, it searches LoadQueue for younger load instructions
379    * with the same load physical address. They loaded wrong data and need re-execution.
380    *
381    * Cycle 0: Store Writeback
382    *   Generate match vector for store address with rangeMask(stPtr, enqPtr).
383    *   Besides, load instructions in LoadUnit_S1 and S2 are also checked.
384    * Cycle 1: Redirect Generation
385    *   There're three possible types of violations. Choose the oldest load.
386    *   Set io.redirect according to the detected violation.
387    */
388  io.load_s1 := DontCare
389  def detectRollback(i: Int) = {
390    val startIndex = io.storeIn(i).bits.uop.lqIdx.value
391    val lqIdxMask = UIntToMask(startIndex, LoadQueueSize)
392    val xorMask = lqIdxMask ^ enqMask
393    val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt(0).flag
394    val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
395
396    // check if load already in lq needs to be rolledback
397    val lqViolationVec = RegNext(VecInit((0 until LoadQueueSize).map(j => {
398      val addrMatch = allocated(j) &&
399        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === dataModule.io.rdata(j).paddr(PAddrBits - 1, 3)
400      val entryNeedCheck = toEnqPtrMask(j) && addrMatch && (datavalid(j) || listening(j) || miss(j))
401      // TODO: update refilled data
402      val violationVec = (0 until 8).map(k => dataModule.io.rdata(j).mask(k) && io.storeIn(i).bits.mask(k))
403      Cat(violationVec).orR() && entryNeedCheck
404    })))
405    val lqViolation = lqViolationVec.asUInt().orR()
406    val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask))
407    val lqViolationUop = uop(lqViolationIndex)
408    // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag
409    // lqViolationUop.lqIdx.value := lqViolationIndex
410    XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n")
411
412    // when l/s writeback to roq together, check if rollback is needed
413    val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
414      io.loadIn(j).valid &&
415        isAfter(io.loadIn(j).bits.uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
416        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) &&
417        (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR
418    })))
419    val wbViolation = wbViolationVec.asUInt().orR()
420    val wbViolationUop = getOldestInTwo(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits.uop))))
421    XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n")
422
423    // check if rollback is needed for load in l1
424    val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
425      io.load_s1(j).valid && // L1 valid
426        isAfter(io.load_s1(j).uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
427        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.load_s1(j).paddr(PAddrBits - 1, 3) &&
428        (io.storeIn(i).bits.mask & io.load_s1(j).mask).orR
429    })))
430    val l1Violation = l1ViolationVec.asUInt().orR()
431    val l1ViolationUop = getOldestInTwo(l1ViolationVec, RegNext(VecInit(io.load_s1.map(_.uop))))
432    XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n")
433
434    val rollbackValidVec = Seq(lqViolation, wbViolation, l1Violation)
435    val rollbackUopVec = Seq(lqViolationUop, wbViolationUop, l1ViolationUop)
436
437    val mask = getAfterMask(rollbackValidVec, rollbackUopVec)
438    val oneAfterZero = mask(1)(0)
439    val rollbackUop = Mux(oneAfterZero && mask(2)(0),
440      rollbackUopVec(0),
441      Mux(!oneAfterZero && mask(2)(1), rollbackUopVec(1), rollbackUopVec(2)))
442
443    XSDebug(
444      l1Violation,
445      "need rollback (l4 load) pc %x roqidx %d target %x\n",
446      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, l1ViolationUop.roqIdx.asUInt
447    )
448    XSDebug(
449      lqViolation,
450      "need rollback (ld wb before store) pc %x roqidx %d target %x\n",
451      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, lqViolationUop.roqIdx.asUInt
452    )
453    XSDebug(
454      wbViolation,
455      "need rollback (ld/st wb together) pc %x roqidx %d target %x\n",
456      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, wbViolationUop.roqIdx.asUInt
457    )
458
459    (RegNext(io.storeIn(i).valid) && Cat(rollbackValidVec).orR, rollbackUop)
460  }
461
462  // rollback check
463  val rollback = Wire(Vec(StorePipelineWidth, Valid(new MicroOp)))
464  for (i <- 0 until StorePipelineWidth) {
465    val detectedRollback = detectRollback(i)
466    rollback(i).valid := detectedRollback._1
467    rollback(i).bits := detectedRollback._2
468  }
469
470  def rollbackSel(a: Valid[MicroOp], b: Valid[MicroOp]): ValidIO[MicroOp] = {
471    Mux(
472      a.valid,
473      Mux(
474        b.valid,
475        Mux(isAfter(a.bits.roqIdx, b.bits.roqIdx), b, a), // a,b both valid, sel oldest
476        a // sel a
477      ),
478      b // sel b
479    )
480  }
481
482  val rollbackSelected = ParallelOperation(rollback, rollbackSel)
483  val lastCycleRedirect = RegNext(io.brqRedirect)
484
485  // Note that we use roqIdx - 1.U to flush the load instruction itself.
486  // Thus, here if last cycle's roqIdx equals to this cycle's roqIdx, it still triggers the redirect.
487  io.rollback.valid := rollbackSelected.valid &&
488    (!lastCycleRedirect.valid || !isAfter(rollbackSelected.bits.roqIdx, lastCycleRedirect.bits.roqIdx)) &&
489    !(lastCycleRedirect.valid && lastCycleRedirect.bits.isUnconditional())
490
491  io.rollback.bits.roqIdx := rollbackSelected.bits.roqIdx
492  io.rollback.bits.level := RedirectLevel.flush
493  io.rollback.bits.interrupt := DontCare
494  io.rollback.bits.pc := DontCare
495  io.rollback.bits.target := rollbackSelected.bits.cf.pc
496  io.rollback.bits.brTag := rollbackSelected.bits.brTag
497
498  when(io.rollback.valid) {
499    XSDebug("Mem rollback: pc %x roqidx %d\n", io.rollback.bits.pc, io.rollback.bits.roqIdx.asUInt)
500  }
501
502  /**
503    * Memory mapped IO / other uncached operations
504    *
505    */
506  io.uncache.req.valid := pending(deqPtr) && allocated(deqPtr) &&
507    io.commits.info(0).commitType === CommitType.LOAD &&
508    io.roqDeqPtr === uop(deqPtr).roqIdx &&
509    !io.commits.isWalk
510
511  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
512  io.uncache.req.bits.addr := dataModule.io.rdata(deqPtr).paddr
513  io.uncache.req.bits.data := dataModule.io.rdata(deqPtr).data
514  io.uncache.req.bits.mask := dataModule.io.rdata(deqPtr).mask
515
516  io.uncache.req.bits.meta.id       := DontCare
517  io.uncache.req.bits.meta.vaddr    := DontCare
518  io.uncache.req.bits.meta.paddr    := dataModule.io.rdata(deqPtr).paddr
519  io.uncache.req.bits.meta.uop      := uop(deqPtr)
520  io.uncache.req.bits.meta.mmio     := true.B
521  io.uncache.req.bits.meta.tlb_miss := false.B
522  io.uncache.req.bits.meta.mask     := dataModule.io.rdata(deqPtr).mask
523  io.uncache.req.bits.meta.replay   := false.B
524
525  io.uncache.resp.ready := true.B
526
527  when (io.uncache.req.fire()) {
528    pending(deqPtr) := false.B
529
530    XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n",
531      uop(deqPtr).cf.pc,
532      io.uncache.req.bits.addr,
533      io.uncache.req.bits.data,
534      io.uncache.req.bits.cmd,
535      io.uncache.req.bits.mask
536    )
537  }
538
539  dataModule.io.uncache.wen := false.B
540  when(io.uncache.resp.fire()){
541    datavalid(deqPtr) := true.B
542    dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0))
543    dataModule.io.uncache.wen := true.B
544
545    XSDebug("uncache resp: data %x\n", io.dcache.resp.bits.data)
546  }
547
548  // Read vaddr for mem exception
549  io.exceptionAddr.vaddr := dataModule.io.rdata(io.exceptionAddr.lsIdx.lqIdx.value).vaddr
550
551  // misprediction recovery / exception redirect
552  // invalidate lq term using robIdx
553  val needCancel = Wire(Vec(LoadQueueSize, Bool()))
554  for (i <- 0 until LoadQueueSize) {
555    needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect) && allocated(i) && !commited(i)
556    when (needCancel(i)) {
557        allocated(i) := false.B
558    }
559  }
560
561  /**
562    * update pointers
563    */
564  val lastCycleCancelCount = PopCount(RegNext(needCancel))
565  // when io.brqRedirect.valid, we don't allow eneuque even though it may fire.
566  val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid, PopCount(io.enq.req.map(_.valid)), 0.U)
567  when (lastCycleRedirect.valid) {
568    // we recover the pointers in the next cycle after redirect
569    enqPtrExt := VecInit(enqPtrExt.map(_ - lastCycleCancelCount))
570  }.otherwise {
571    enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
572  }
573
574  val commitCount = PopCount(loadCommit)
575  deqPtrExt := deqPtrExt + commitCount
576
577  val lastLastCycleRedirect = RegNext(lastCycleRedirect.valid)
578  val trueValidCounter = distanceBetween(enqPtrExt(0), deqPtrExt)
579  validCounter := Mux(lastLastCycleRedirect,
580    trueValidCounter,
581    validCounter + enqNumber - commitCount
582  )
583
584  allowEnqueue := Mux(io.brqRedirect.valid,
585    false.B,
586    Mux(lastLastCycleRedirect,
587      trueValidCounter <= (LoadQueueSize - RenameWidth).U,
588      validCounter + enqNumber <= (LoadQueueSize - RenameWidth).U
589    )
590  )
591
592  // debug info
593  XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr)
594
595  def PrintFlag(flag: Bool, name: String): Unit = {
596    when(flag) {
597      XSDebug(false, true.B, name)
598    }.otherwise {
599      XSDebug(false, true.B, " ")
600    }
601  }
602
603  for (i <- 0 until LoadQueueSize) {
604    if (i % 4 == 0) XSDebug("")
605    XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.rdata(i).paddr)
606    PrintFlag(allocated(i), "a")
607    PrintFlag(allocated(i) && datavalid(i), "v")
608    PrintFlag(allocated(i) && writebacked(i), "w")
609    PrintFlag(allocated(i) && commited(i), "c")
610    PrintFlag(allocated(i) && miss(i), "m")
611    PrintFlag(allocated(i) && listening(i), "l")
612    PrintFlag(allocated(i) && pending(i), "p")
613    XSDebug(false, true.B, " ")
614    if (i % 4 == 3 || i == LoadQueueSize - 1) XSDebug(false, true.B, "\n")
615  }
616
617}
618