xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueue.scala (revision 1d32896e2cd7aec72f0330f4f790abd81ff24104)
1package xiangshan.mem
2
3import chisel3._
4import chisel3.util._
5import freechips.rocketchip.tile.HasFPUParameters
6import utils._
7import xiangshan._
8import xiangshan.cache._
9import xiangshan.cache.{DCacheLineIO, DCacheWordIO, MemoryOpConstants, TlbRequestIO}
10import xiangshan.backend.LSUOpType
11import xiangshan.mem._
12import xiangshan.backend.roq.RoqPtr
13import xiangshan.backend.fu.HasExceptionNO
14
15
16class LqPtr extends CircularQueuePtr(LqPtr.LoadQueueSize) { }
17
18object LqPtr extends HasXSParameter {
19  def apply(f: Bool, v: UInt): LqPtr = {
20    val ptr = Wire(new LqPtr)
21    ptr.flag := f
22    ptr.value := v
23    ptr
24  }
25}
26
27trait HasLoadHelper { this: XSModule =>
28  def rdataHelper(uop: MicroOp, rdata: UInt): UInt = {
29    val fpWen = uop.ctrl.fpWen
30    LookupTree(uop.ctrl.fuOpType, List(
31      LSUOpType.lb   -> SignExt(rdata(7, 0) , XLEN),
32      LSUOpType.lh   -> SignExt(rdata(15, 0), XLEN),
33      LSUOpType.lw   -> Mux(fpWen, rdata, SignExt(rdata(31, 0), XLEN)),
34      LSUOpType.ld   -> Mux(fpWen, rdata, SignExt(rdata(63, 0), XLEN)),
35      LSUOpType.lbu  -> ZeroExt(rdata(7, 0) , XLEN),
36      LSUOpType.lhu  -> ZeroExt(rdata(15, 0), XLEN),
37      LSUOpType.lwu  -> ZeroExt(rdata(31, 0), XLEN),
38    ))
39  }
40
41  def fpRdataHelper(uop: MicroOp, rdata: UInt): UInt = {
42    LookupTree(uop.ctrl.fuOpType, List(
43      LSUOpType.lw   -> recode(rdata(31, 0), S),
44      LSUOpType.ld   -> recode(rdata(63, 0), D)
45    ))
46  }
47}
48
49class LqEnqIO extends XSBundle {
50  val canAccept = Output(Bool())
51  val sqCanAccept = Input(Bool())
52  val needAlloc = Vec(RenameWidth, Input(Bool()))
53  val req = Vec(RenameWidth, Flipped(ValidIO(new MicroOp)))
54  val resp = Vec(RenameWidth, Output(new LqPtr))
55}
56
57// Load Queue
58class LoadQueue extends XSModule
59  with HasDCacheParameters
60  with HasCircularQueuePtrHelper
61  with HasLoadHelper
62  with HasExceptionNO
63{
64  val io = IO(new Bundle() {
65    val enq = new LqEnqIO
66    val brqRedirect = Input(Valid(new Redirect))
67    val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle)))
68    val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle)))
69    val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback int load
70    val load_s1 = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
71    val commits = Flipped(new RoqCommitIO)
72    val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store
73    val dcache = Flipped(ValidIO(new Refill))
74    val uncache = new DCacheWordIO
75    val roqDeqPtr = Input(new RoqPtr)
76    val exceptionAddr = new ExceptionAddrIO
77  })
78
79  val uop = Reg(Vec(LoadQueueSize, new MicroOp))
80  // val data = Reg(Vec(LoadQueueSize, new LsRoqEntry))
81  val dataModule = Module(new LoadQueueData(LoadQueueSize, wbNumRead = LoadPipelineWidth, wbNumWrite = LoadPipelineWidth))
82  dataModule.io := DontCare
83  val vaddrModule = Module(new AsyncDataModuleTemplate(UInt(VAddrBits.W), LoadQueueSize, numRead = 1, numWrite = LoadPipelineWidth))
84  vaddrModule.io := DontCare
85  val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated
86  val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid
87  val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
88  val commited = Reg(Vec(LoadQueueSize, Bool())) // inst has been writebacked to CDB
89  val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request
90  // val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result
91  val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq
92
93  val debug_mmio = Reg(Vec(LoadQueueSize, Bool())) // mmio: inst is an mmio inst
94
95  val enqPtrExt = RegInit(VecInit((0 until RenameWidth).map(_.U.asTypeOf(new LqPtr))))
96  val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
97  val deqPtrExtNext = Wire(new LqPtr)
98  val validCounter = RegInit(0.U(log2Ceil(LoadQueueSize + 1).W))
99  val allowEnqueue = RegInit(true.B)
100
101  val enqPtr = enqPtrExt(0).value
102  val deqPtr = deqPtrExt.value
103  val sameFlag = enqPtrExt(0).flag === deqPtrExt.flag
104  val isEmpty = enqPtr === deqPtr && sameFlag
105  val isFull = enqPtr === deqPtr && !sameFlag
106  val allowIn = !isFull
107
108  val loadCommit = (0 until CommitWidth).map(i => io.commits.valid(i) && !io.commits.isWalk && io.commits.info(i).commitType === CommitType.LOAD)
109  val mcommitIdx = (0 until CommitWidth).map(i => io.commits.info(i).lqIdx.value)
110
111  val deqMask = UIntToMask(deqPtr, LoadQueueSize)
112  val enqMask = UIntToMask(enqPtr, LoadQueueSize)
113
114  /**
115    * Enqueue at dispatch
116    *
117    * Currently, LoadQueue only allows enqueue when #emptyEntries > RenameWidth(EnqWidth)
118    */
119  io.enq.canAccept := allowEnqueue
120
121  for (i <- 0 until RenameWidth) {
122    val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i))
123    val lqIdx = enqPtrExt(offset)
124    val index = lqIdx.value
125    when (io.enq.req(i).valid && io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid) {
126      uop(index) := io.enq.req(i).bits
127      allocated(index) := true.B
128      datavalid(index) := false.B
129      writebacked(index) := false.B
130      commited(index) := false.B
131      miss(index) := false.B
132      // listening(index) := false.B
133      pending(index) := false.B
134    }
135    io.enq.resp(i) := lqIdx
136  }
137  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
138
139  /**
140    * Writeback load from load units
141    *
142    * Most load instructions writeback to regfile at the same time.
143    * However,
144    *   (1) For an mmio instruction with exceptions, it writes back to ROB immediately.
145    *   (2) For an mmio instruction without exceptions, it does not write back.
146    * The mmio instruction will be sent to lower level when it reaches ROB's head.
147    * After uncache response, it will write back through arbiter with loadUnit.
148    *   (3) For cache misses, it is marked miss and sent to dcache later.
149    * After cache refills, it will write back through arbiter with loadUnit.
150    */
151  for (i <- 0 until LoadPipelineWidth) {
152    dataModule.io.wb.wen(i) := false.B
153    vaddrModule.io.wen(i) := false.B
154    when(io.loadIn(i).fire()) {
155      when(io.loadIn(i).bits.miss) {
156        XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n",
157          io.loadIn(i).bits.uop.lqIdx.asUInt,
158          io.loadIn(i).bits.uop.cf.pc,
159          io.loadIn(i).bits.vaddr,
160          io.loadIn(i).bits.paddr,
161          io.loadIn(i).bits.data,
162          io.loadIn(i).bits.mask,
163          io.loadIn(i).bits.forwardData.asUInt,
164          io.loadIn(i).bits.forwardMask.asUInt,
165          io.loadIn(i).bits.mmio
166          )
167        }.otherwise {
168          XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n",
169          io.loadIn(i).bits.uop.lqIdx.asUInt,
170          io.loadIn(i).bits.uop.cf.pc,
171          io.loadIn(i).bits.vaddr,
172          io.loadIn(i).bits.paddr,
173          io.loadIn(i).bits.data,
174          io.loadIn(i).bits.mask,
175          io.loadIn(i).bits.forwardData.asUInt,
176          io.loadIn(i).bits.forwardMask.asUInt,
177          io.loadIn(i).bits.mmio
178          )
179        }
180        val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
181        datavalid(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
182        writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
183
184        val loadWbData = Wire(new LQDataEntry)
185        loadWbData.paddr := io.loadIn(i).bits.paddr
186        loadWbData.mask := io.loadIn(i).bits.mask
187        loadWbData.data := io.loadIn(i).bits.data // fwd data
188        loadWbData.fwdMask := io.loadIn(i).bits.forwardMask
189        dataModule.io.wbWrite(i, loadWbIndex, loadWbData)
190        dataModule.io.wb.wen(i) := true.B
191
192        vaddrModule.io.waddr(i) := loadWbIndex
193        vaddrModule.io.wdata(i) := io.loadIn(i).bits.vaddr
194        vaddrModule.io.wen(i) := true.B
195
196        debug_mmio(loadWbIndex) := io.loadIn(i).bits.mmio
197
198        val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
199        miss(loadWbIndex) := dcacheMissed
200        pending(loadWbIndex) := io.loadIn(i).bits.mmio
201        uop(loadWbIndex).debugInfo.issueTime := io.loadIn(i).bits.uop.debugInfo.issueTime
202      }
203    }
204
205  /**
206    * Cache miss request
207    *
208    * (1) writeback: miss
209    * (2) send to dcache: listing
210    * (3) dcache response: datavalid
211    * (4) writeback to ROB: writeback
212    */
213  // val inflightReqs = RegInit(VecInit(Seq.fill(cfg.nLoadMissEntries)(0.U.asTypeOf(new InflightBlockInfo))))
214  // val inflightReqFull = inflightReqs.map(req => req.valid).reduce(_&&_)
215  // val reqBlockIndex = PriorityEncoder(~VecInit(inflightReqs.map(req => req.valid)).asUInt)
216
217  // val missRefillSelVec = VecInit(
218  //   (0 until LoadQueueSize).map{ i =>
219  //     val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(dataModule.io.rdata(i).paddr)).reduce(_||_)
220  //     allocated(i) && miss(i) && !inflight
221  //   })
222
223  // val missRefillSel = getFirstOne(missRefillSelVec, deqMask)
224  // val missRefillBlockAddr = get_block_addr(dataModule.io.rdata(missRefillSel).paddr)
225  // io.dcache.req.valid := missRefillSelVec.asUInt.orR
226  // io.dcache.req.bits.cmd := MemoryOpConstants.M_XRD
227  // io.dcache.req.bits.addr := missRefillBlockAddr
228  // io.dcache.req.bits.data := DontCare
229  // io.dcache.req.bits.mask := DontCare
230
231  // io.dcache.req.bits.meta.id       := DontCare
232  // io.dcache.req.bits.meta.vaddr    := DontCare // dataModule.io.rdata(missRefillSel).vaddr
233  // io.dcache.req.bits.meta.paddr    := missRefillBlockAddr
234  // io.dcache.req.bits.meta.uop      := uop(missRefillSel)
235  // io.dcache.req.bits.meta.mmio     := false.B // dataModule.io.rdata(missRefillSel).mmio
236  // io.dcache.req.bits.meta.tlb_miss := false.B
237  // io.dcache.req.bits.meta.mask     := DontCare
238  // io.dcache.req.bits.meta.replay   := false.B
239
240  // assert(!(dataModule.io.rdata(missRefillSel).mmio && io.dcache.req.valid))
241
242  // when(io.dcache.req.fire()) {
243  //   miss(missRefillSel) := false.B
244    // listening(missRefillSel) := true.B
245
246    // mark this block as inflight
247  //   inflightReqs(reqBlockIndex).valid := true.B
248  //   inflightReqs(reqBlockIndex).block_addr := missRefillBlockAddr
249  //   assert(!inflightReqs(reqBlockIndex).valid)
250  // }
251
252  // when(io.dcache.resp.fire()) {
253  //   val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)).reduce(_||_)
254  //   assert(inflight)
255  //   for (i <- 0 until cfg.nLoadMissEntries) {
256  //     when (inflightReqs(i).valid && inflightReqs(i).block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)) {
257  //       inflightReqs(i).valid := false.B
258  //     }
259  //   }
260  // }
261
262
263  // when(io.dcache.req.fire()){
264  //   XSDebug("miss req: pc:0x%x roqIdx:%d lqIdx:%d (p)addr:0x%x vaddr:0x%x\n",
265  //     io.dcache.req.bits.meta.uop.cf.pc, io.dcache.req.bits.meta.uop.roqIdx.asUInt, io.dcache.req.bits.meta.uop.lqIdx.asUInt,
266  //     io.dcache.req.bits.addr, io.dcache.req.bits.meta.vaddr
267  //   )
268  // }
269
270  when(io.dcache.valid) {
271    XSDebug("miss resp: paddr:0x%x data %x\n", io.dcache.bits.addr, io.dcache.bits.data)
272  }
273
274  // Refill 64 bit in a cycle
275  // Refill data comes back from io.dcache.resp
276  dataModule.io.refill.valid := io.dcache.valid
277  dataModule.io.refill.paddr := io.dcache.bits.addr
278  dataModule.io.refill.data := io.dcache.bits.data
279
280  (0 until LoadQueueSize).map(i => {
281    dataModule.io.refill.refillMask(i) := allocated(i) && miss(i)
282    when(dataModule.io.refill.valid && dataModule.io.refill.refillMask(i) && dataModule.io.refill.matchMask(i)) {
283      datavalid(i) := true.B
284      miss(i) := false.B
285    }
286  })
287
288  // Writeback up to 2 missed load insts to CDB
289  //
290  // Pick 2 missed load (data refilled), write them back to cdb
291  // 2 refilled load will be selected from even/odd entry, separately
292
293  // Stage 0
294  // Generate writeback indexes
295
296  def getEvenBits(input: UInt): UInt = {
297    require(input.getWidth == LoadQueueSize)
298    VecInit((0 until LoadQueueSize/2).map(i => {input(2*i)})).asUInt
299  }
300  def getOddBits(input: UInt): UInt = {
301    require(input.getWidth == LoadQueueSize)
302    VecInit((0 until LoadQueueSize/2).map(i => {input(2*i+1)})).asUInt
303  }
304
305  val loadWbSel = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W))) // index selected last cycle
306  val loadWbSelV = RegInit(VecInit(List.fill(LoadPipelineWidth)(false.B))) // index selected in last cycle is valid
307
308  val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => {
309    allocated(i) && !writebacked(i) && datavalid(i)
310  })).asUInt() // use uint instead vec to reduce verilog lines
311  val evenDeqMask = getEvenBits(deqMask)
312  val oddDeqMask = getOddBits(deqMask)
313  // generate lastCycleSelect mask
314  val evenSelectMask = Mux(io.ldout(0).fire(), getEvenBits(UIntToOH(loadWbSel(0))), 0.U)
315  val oddSelectMask = Mux(io.ldout(1).fire(), getOddBits(UIntToOH(loadWbSel(1))), 0.U)
316  // generate real select vec
317  val loadEvenSelVec = getEvenBits(loadWbSelVec) & ~evenSelectMask
318  val loadOddSelVec = getOddBits(loadWbSelVec) & ~oddSelectMask
319
320  def toVec(a: UInt): Vec[Bool] = {
321    VecInit(a.asBools)
322  }
323
324  val loadWbSelGen = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W)))
325  val loadWbSelVGen = Wire(Vec(LoadPipelineWidth, Bool()))
326  loadWbSelGen(0) := Cat(getFirstOne(toVec(loadEvenSelVec), evenDeqMask), 0.U(1.W))
327  loadWbSelVGen(0):= loadEvenSelVec.asUInt.orR
328  loadWbSelGen(1) := Cat(getFirstOne(toVec(loadOddSelVec), oddDeqMask), 1.U(1.W))
329  loadWbSelVGen(1) := loadOddSelVec.asUInt.orR
330
331  (0 until LoadPipelineWidth).map(i => {
332    val canGo = io.ldout(i).fire() || !loadWbSelV(i)
333    val valid = loadWbSelVGen(i)
334    loadWbSel(i) := RegEnable(loadWbSelGen(i), valid && canGo)
335    when(io.ldout(i).fire()){
336      // Mark them as writebacked, so they will not be selected in the next cycle
337      writebacked(loadWbSel(i)) := true.B
338      // update loadWbSelValidReg
339      loadWbSelV(i) := false.B
340    }
341    when(valid && canGo){
342      loadWbSelV(i) := true.B
343    }
344  })
345
346  // Stage 1
347  // Use indexes generated in cycle 0 to read data
348  // writeback data to cdb
349  (0 until LoadPipelineWidth).map(i => {
350    // data select
351    dataModule.io.wb.raddr(i) := loadWbSelGen(i)
352    val rdata = dataModule.io.wb.rdata(i).data
353    val seluop = uop(loadWbSel(i))
354    val func = seluop.ctrl.fuOpType
355    val raddr = dataModule.io.wb.rdata(i).paddr
356    val rdataSel = LookupTree(raddr(2, 0), List(
357      "b000".U -> rdata(63, 0),
358      "b001".U -> rdata(63, 8),
359      "b010".U -> rdata(63, 16),
360      "b011".U -> rdata(63, 24),
361      "b100".U -> rdata(63, 32),
362      "b101".U -> rdata(63, 40),
363      "b110".U -> rdata(63, 48),
364      "b111".U -> rdata(63, 56)
365    ))
366    val rdataPartialLoad = rdataHelper(seluop, rdataSel)
367
368    // writeback missed int/fp load
369    //
370    // Int load writeback will finish (if not blocked) in one cycle
371    io.ldout(i).bits.uop := seluop
372    io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr)
373    io.ldout(i).bits.data := rdataPartialLoad
374    io.ldout(i).bits.redirectValid := false.B
375    io.ldout(i).bits.redirect := DontCare
376    io.ldout(i).bits.brUpdate := DontCare
377    io.ldout(i).bits.debug.isMMIO := debug_mmio(loadWbSel(i))
378    io.ldout(i).bits.debug.isPerfCnt := false.B
379    io.ldout(i).bits.fflags := DontCare
380    io.ldout(i).valid := loadWbSelV(i)
381
382    when(io.ldout(i).fire()) {
383      XSInfo("int load miss write to cbd roqidx %d lqidx %d pc 0x%x mmio %x\n",
384        io.ldout(i).bits.uop.roqIdx.asUInt,
385        io.ldout(i).bits.uop.lqIdx.asUInt,
386        io.ldout(i).bits.uop.cf.pc,
387        debug_mmio(loadWbSel(i))
388      )
389    }
390
391  })
392
393  /**
394    * Load commits
395    *
396    * When load commited, mark it as !allocated and move deqPtrExt forward.
397    */
398  (0 until CommitWidth).map(i => {
399    when(loadCommit(i)) {
400      allocated(mcommitIdx(i)) := false.B
401      XSDebug("load commit %d: idx %d %x\n", i.U, mcommitIdx(i), uop(mcommitIdx(i)).cf.pc)
402    }
403  })
404
405  def getFirstOne(mask: Vec[Bool], startMask: UInt) = {
406    val length = mask.length
407    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
408    val highBitsUint = Cat(highBits.reverse)
409    PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt))
410  }
411
412  def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = {
413    assert(valid.length == uop.length)
414    assert(valid.length == 2)
415    Mux(valid(0) && valid(1),
416      Mux(isAfter(uop(0).roqIdx, uop(1).roqIdx), uop(1), uop(0)),
417      Mux(valid(0) && !valid(1), uop(0), uop(1)))
418  }
419
420  def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = {
421    assert(valid.length == uop.length)
422    val length = valid.length
423    (0 until length).map(i => {
424      (0 until length).map(j => {
425        Mux(valid(i) && valid(j),
426          isAfter(uop(i).roqIdx, uop(j).roqIdx),
427          Mux(!valid(i), true.B, false.B))
428      })
429    })
430  }
431
432  /**
433    * Memory violation detection
434    *
435    * When store writes back, it searches LoadQueue for younger load instructions
436    * with the same load physical address. They loaded wrong data and need re-execution.
437    *
438    * Cycle 0: Store Writeback
439    *   Generate match vector for store address with rangeMask(stPtr, enqPtr).
440    *   Besides, load instructions in LoadUnit_S1 and S2 are also checked.
441    * Cycle 1: Redirect Generation
442    *   There're three possible types of violations. Choose the oldest load.
443    *   Set io.redirect according to the detected violation.
444    */
445  io.load_s1 := DontCare
446  def detectRollback(i: Int) = {
447    val startIndex = io.storeIn(i).bits.uop.lqIdx.value
448    val lqIdxMask = UIntToMask(startIndex, LoadQueueSize)
449    val xorMask = lqIdxMask ^ enqMask
450    val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt(0).flag
451    val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
452
453    // check if load already in lq needs to be rolledback
454    dataModule.io.violation(i).paddr := io.storeIn(i).bits.paddr
455    dataModule.io.violation(i).mask := io.storeIn(i).bits.mask
456    val addrMaskMatch = RegNext(dataModule.io.violation(i).violationMask)
457    val entryNeedCheck = RegNext(VecInit((0 until LoadQueueSize).map(j => {
458      allocated(j) && toEnqPtrMask(j) && (datavalid(j) || miss(j))
459    })))
460    val lqViolationVec = VecInit((0 until LoadQueueSize).map(j => {
461      addrMaskMatch(j) && entryNeedCheck(j)
462    }))
463    val lqViolation = lqViolationVec.asUInt().orR()
464    val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask))
465    val lqViolationUop = uop(lqViolationIndex)
466    // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag
467    // lqViolationUop.lqIdx.value := lqViolationIndex
468    XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n")
469
470    // when l/s writeback to roq together, check if rollback is needed
471    val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
472      io.loadIn(j).valid &&
473        isAfter(io.loadIn(j).bits.uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
474        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) &&
475        (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR
476    })))
477    val wbViolation = wbViolationVec.asUInt().orR()
478    val wbViolationUop = getOldestInTwo(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits.uop))))
479    XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n")
480
481    // check if rollback is needed for load in l1
482    val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
483      io.load_s1(j).valid && // L1 valid
484        isAfter(io.load_s1(j).uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
485        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.load_s1(j).paddr(PAddrBits - 1, 3) &&
486        (io.storeIn(i).bits.mask & io.load_s1(j).mask).orR
487    })))
488    val l1Violation = l1ViolationVec.asUInt().orR()
489    val l1ViolationUop = getOldestInTwo(l1ViolationVec, RegNext(VecInit(io.load_s1.map(_.uop))))
490    XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n")
491
492    val rollbackValidVec = Seq(lqViolation, wbViolation, l1Violation)
493    val rollbackUopVec = Seq(lqViolationUop, wbViolationUop, l1ViolationUop)
494
495    val mask = getAfterMask(rollbackValidVec, rollbackUopVec)
496    val oneAfterZero = mask(1)(0)
497    val rollbackUop = Mux(oneAfterZero && mask(2)(0),
498      rollbackUopVec(0),
499      Mux(!oneAfterZero && mask(2)(1), rollbackUopVec(1), rollbackUopVec(2)))
500
501    XSDebug(
502      l1Violation,
503      "need rollback (l4 load) pc %x roqidx %d target %x\n",
504      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, l1ViolationUop.roqIdx.asUInt
505    )
506    XSDebug(
507      lqViolation,
508      "need rollback (ld wb before store) pc %x roqidx %d target %x\n",
509      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, lqViolationUop.roqIdx.asUInt
510    )
511    XSDebug(
512      wbViolation,
513      "need rollback (ld/st wb together) pc %x roqidx %d target %x\n",
514      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, wbViolationUop.roqIdx.asUInt
515    )
516
517    (RegNext(io.storeIn(i).valid) && Cat(rollbackValidVec).orR, rollbackUop)
518  }
519
520  // rollback check
521  val rollback = Wire(Vec(StorePipelineWidth, Valid(new MicroOp)))
522  for (i <- 0 until StorePipelineWidth) {
523    val detectedRollback = detectRollback(i)
524    rollback(i).valid := detectedRollback._1
525    rollback(i).bits := detectedRollback._2
526  }
527
528  def rollbackSel(a: Valid[MicroOp], b: Valid[MicroOp]): ValidIO[MicroOp] = {
529    Mux(
530      a.valid,
531      Mux(
532        b.valid,
533        Mux(isAfter(a.bits.roqIdx, b.bits.roqIdx), b, a), // a,b both valid, sel oldest
534        a // sel a
535      ),
536      b // sel b
537    )
538  }
539
540  val rollbackSelected = ParallelOperation(rollback, rollbackSel)
541  val lastCycleRedirect = RegNext(io.brqRedirect)
542
543  // Note that we use roqIdx - 1.U to flush the load instruction itself.
544  // Thus, here if last cycle's roqIdx equals to this cycle's roqIdx, it still triggers the redirect.
545  io.rollback.valid := rollbackSelected.valid &&
546    (!lastCycleRedirect.valid || !isAfter(rollbackSelected.bits.roqIdx, lastCycleRedirect.bits.roqIdx)) &&
547    !(lastCycleRedirect.valid && lastCycleRedirect.bits.isUnconditional())
548
549  io.rollback.bits.roqIdx := rollbackSelected.bits.roqIdx
550  io.rollback.bits.level := RedirectLevel.flush
551  io.rollback.bits.interrupt := DontCare
552  io.rollback.bits.pc := DontCare
553  io.rollback.bits.target := rollbackSelected.bits.cf.pc
554  io.rollback.bits.brTag := rollbackSelected.bits.brTag
555
556  when(io.rollback.valid) {
557    XSDebug("Mem rollback: pc %x roqidx %d\n", io.rollback.bits.pc, io.rollback.bits.roqIdx.asUInt)
558  }
559
560  /**
561    * Memory mapped IO / other uncached operations
562    *
563    */
564  io.uncache.req.valid := pending(deqPtr) && allocated(deqPtr) &&
565    io.commits.info(0).commitType === CommitType.LOAD &&
566    io.roqDeqPtr === uop(deqPtr).roqIdx &&
567    !io.commits.isWalk
568
569  dataModule.io.uncache.raddr := deqPtrExtNext.value
570
571  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
572  io.uncache.req.bits.addr := dataModule.io.uncache.rdata.paddr
573  io.uncache.req.bits.data := dataModule.io.uncache.rdata.data
574  io.uncache.req.bits.mask := dataModule.io.uncache.rdata.mask
575
576  io.uncache.req.bits.meta.id       := DontCare
577  io.uncache.req.bits.meta.vaddr    := DontCare
578  io.uncache.req.bits.meta.paddr    := dataModule.io.uncache.rdata.paddr
579  io.uncache.req.bits.meta.uop      := uop(deqPtr)
580  io.uncache.req.bits.meta.mmio     := true.B
581  io.uncache.req.bits.meta.tlb_miss := false.B
582  io.uncache.req.bits.meta.mask     := dataModule.io.uncache.rdata.mask
583  io.uncache.req.bits.meta.replay   := false.B
584
585  io.uncache.resp.ready := true.B
586
587  when (io.uncache.req.fire()) {
588    pending(deqPtr) := false.B
589
590    XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n",
591      uop(deqPtr).cf.pc,
592      io.uncache.req.bits.addr,
593      io.uncache.req.bits.data,
594      io.uncache.req.bits.cmd,
595      io.uncache.req.bits.mask
596    )
597  }
598
599  dataModule.io.uncache.wen := false.B
600  when(io.uncache.resp.fire()){
601    datavalid(deqPtr) := true.B
602    dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0))
603    dataModule.io.uncache.wen := true.B
604
605    XSDebug("uncache resp: data %x\n", io.dcache.bits.data)
606  }
607
608  // Read vaddr for mem exception
609  vaddrModule.io.raddr(0) := io.exceptionAddr.lsIdx.lqIdx.value
610  io.exceptionAddr.vaddr := vaddrModule.io.rdata(0)
611
612  // misprediction recovery / exception redirect
613  // invalidate lq term using robIdx
614  val needCancel = Wire(Vec(LoadQueueSize, Bool()))
615  for (i <- 0 until LoadQueueSize) {
616    needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect) && allocated(i) && !commited(i)
617    when (needCancel(i)) {
618        allocated(i) := false.B
619    }
620  }
621
622  /**
623    * update pointers
624    */
625  val lastCycleCancelCount = PopCount(RegNext(needCancel))
626  // when io.brqRedirect.valid, we don't allow eneuque even though it may fire.
627  val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid, PopCount(io.enq.req.map(_.valid)), 0.U)
628  when (lastCycleRedirect.valid) {
629    // we recover the pointers in the next cycle after redirect
630    enqPtrExt := VecInit(enqPtrExt.map(_ - lastCycleCancelCount))
631  }.otherwise {
632    enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
633  }
634
635  val commitCount = PopCount(loadCommit)
636  deqPtrExtNext := deqPtrExt + commitCount
637  deqPtrExt := deqPtrExtNext
638
639  val lastLastCycleRedirect = RegNext(lastCycleRedirect.valid)
640  val trueValidCounter = distanceBetween(enqPtrExt(0), deqPtrExt)
641  validCounter := Mux(lastLastCycleRedirect,
642    trueValidCounter,
643    validCounter + enqNumber - commitCount
644  )
645
646  allowEnqueue := Mux(io.brqRedirect.valid,
647    false.B,
648    Mux(lastLastCycleRedirect,
649      trueValidCounter <= (LoadQueueSize - RenameWidth).U,
650      validCounter + enqNumber <= (LoadQueueSize - RenameWidth).U
651    )
652  )
653
654  // debug info
655  XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr)
656
657  def PrintFlag(flag: Bool, name: String): Unit = {
658    when(flag) {
659      XSDebug(false, true.B, name)
660    }.otherwise {
661      XSDebug(false, true.B, " ")
662    }
663  }
664
665  for (i <- 0 until LoadQueueSize) {
666    if (i % 4 == 0) XSDebug("")
667    XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.debug(i).paddr)
668    PrintFlag(allocated(i), "a")
669    PrintFlag(allocated(i) && datavalid(i), "v")
670    PrintFlag(allocated(i) && writebacked(i), "w")
671    PrintFlag(allocated(i) && commited(i), "c")
672    PrintFlag(allocated(i) && miss(i), "m")
673    // PrintFlag(allocated(i) && listening(i), "l")
674    PrintFlag(allocated(i) && pending(i), "p")
675    XSDebug(false, true.B, " ")
676    if (i % 4 == 3 || i == LoadQueueSize - 1) XSDebug(false, true.B, "\n")
677  }
678
679}
680