xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueue.scala (revision 2d7c7105479bec3c329cf213502bd6a01cff7c0a)
1package xiangshan.mem
2
3import chisel3._
4import chisel3.util._
5import freechips.rocketchip.tile.HasFPUParameters
6import utils._
7import xiangshan._
8import xiangshan.cache._
9import xiangshan.cache.{DCacheLineIO, DCacheWordIO, MemoryOpConstants, TlbRequestIO}
10import xiangshan.backend.LSUOpType
11import xiangshan.mem._
12import xiangshan.backend.roq.RoqPtr
13import xiangshan.backend.fu.HasExceptionNO
14
15
16class LqPtr extends CircularQueuePtr(LqPtr.LoadQueueSize) { }
17
18object LqPtr extends HasXSParameter {
19  def apply(f: Bool, v: UInt): LqPtr = {
20    val ptr = Wire(new LqPtr)
21    ptr.flag := f
22    ptr.value := v
23    ptr
24  }
25}
26
27trait HasLoadHelper { this: XSModule =>
28  def rdataHelper(uop: MicroOp, rdata: UInt): UInt = {
29    val fpWen = uop.ctrl.fpWen
30    LookupTree(uop.ctrl.fuOpType, List(
31      LSUOpType.lb   -> SignExt(rdata(7, 0) , XLEN),
32      LSUOpType.lh   -> SignExt(rdata(15, 0), XLEN),
33      LSUOpType.lw   -> Mux(fpWen, rdata, SignExt(rdata(31, 0), XLEN)),
34      LSUOpType.ld   -> Mux(fpWen, rdata, SignExt(rdata(63, 0), XLEN)),
35      LSUOpType.lbu  -> ZeroExt(rdata(7, 0) , XLEN),
36      LSUOpType.lhu  -> ZeroExt(rdata(15, 0), XLEN),
37      LSUOpType.lwu  -> ZeroExt(rdata(31, 0), XLEN),
38    ))
39  }
40
41  def fpRdataHelper(uop: MicroOp, rdata: UInt): UInt = {
42    LookupTree(uop.ctrl.fuOpType, List(
43      LSUOpType.lw   -> recode(rdata(31, 0), S),
44      LSUOpType.ld   -> recode(rdata(63, 0), D)
45    ))
46  }
47}
48
49class LqEnqIO extends XSBundle {
50  val canAccept = Output(Bool())
51  val sqCanAccept = Input(Bool())
52  val needAlloc = Vec(RenameWidth, Input(Bool()))
53  val req = Vec(RenameWidth, Flipped(ValidIO(new MicroOp)))
54  val resp = Vec(RenameWidth, Output(new LqPtr))
55}
56
57// Load Queue
58class LoadQueue extends XSModule
59  with HasDCacheParameters
60  with HasCircularQueuePtrHelper
61  with HasLoadHelper
62  with HasExceptionNO
63{
64  val io = IO(new Bundle() {
65    val enq = new LqEnqIO
66    val brqRedirect = Flipped(ValidIO(new Redirect))
67    val flush = Input(Bool())
68    val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle)))
69    val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle)))
70    val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback int load
71    val load_s1 = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
72    val commits = Flipped(new RoqCommitIO)
73    val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store
74    val dcache = Flipped(ValidIO(new Refill))
75    val uncache = new DCacheWordIO
76    val roqDeqPtr = Input(new RoqPtr)
77    val exceptionAddr = new ExceptionAddrIO
78  })
79
80  val uop = Reg(Vec(LoadQueueSize, new MicroOp))
81  // val data = Reg(Vec(LoadQueueSize, new LsRoqEntry))
82  val dataModule = Module(new LoadQueueData(LoadQueueSize, wbNumRead = LoadPipelineWidth, wbNumWrite = LoadPipelineWidth))
83  dataModule.io := DontCare
84  val vaddrModule = Module(new AsyncDataModuleTemplate(UInt(VAddrBits.W), LoadQueueSize, numRead = 1, numWrite = LoadPipelineWidth))
85  vaddrModule.io := DontCare
86  val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated
87  val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid
88  val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
89  val commited = Reg(Vec(LoadQueueSize, Bool())) // inst has been writebacked to CDB
90  val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request
91  // val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result
92  val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq
93
94  val debug_mmio = Reg(Vec(LoadQueueSize, Bool())) // mmio: inst is an mmio inst
95
96  val enqPtrExt = RegInit(VecInit((0 until RenameWidth).map(_.U.asTypeOf(new LqPtr))))
97  val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
98  val deqPtrExtNext = Wire(new LqPtr)
99  val validCounter = RegInit(0.U(log2Ceil(LoadQueueSize + 1).W))
100  val allowEnqueue = RegInit(true.B)
101
102  val enqPtr = enqPtrExt(0).value
103  val deqPtr = deqPtrExt.value
104  val sameFlag = enqPtrExt(0).flag === deqPtrExt.flag
105  val isEmpty = enqPtr === deqPtr && sameFlag
106  val isFull = enqPtr === deqPtr && !sameFlag
107  val allowIn = !isFull
108
109  val loadCommit = (0 until CommitWidth).map(i => io.commits.valid(i) && !io.commits.isWalk && io.commits.info(i).commitType === CommitType.LOAD)
110  val mcommitIdx = (0 until CommitWidth).map(i => io.commits.info(i).lqIdx.value)
111
112  val deqMask = UIntToMask(deqPtr, LoadQueueSize)
113  val enqMask = UIntToMask(enqPtr, LoadQueueSize)
114
115  /**
116    * Enqueue at dispatch
117    *
118    * Currently, LoadQueue only allows enqueue when #emptyEntries > RenameWidth(EnqWidth)
119    */
120  io.enq.canAccept := allowEnqueue
121
122  for (i <- 0 until RenameWidth) {
123    val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i))
124    val lqIdx = enqPtrExt(offset)
125    val index = lqIdx.value
126    when (io.enq.req(i).valid && io.enq.canAccept && io.enq.sqCanAccept && !(io.brqRedirect.valid || io.flush)) {
127      uop(index) := io.enq.req(i).bits
128      allocated(index) := true.B
129      datavalid(index) := false.B
130      writebacked(index) := false.B
131      commited(index) := false.B
132      miss(index) := false.B
133      // listening(index) := false.B
134      pending(index) := false.B
135    }
136    io.enq.resp(i) := lqIdx
137  }
138  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
139
140  /**
141    * Writeback load from load units
142    *
143    * Most load instructions writeback to regfile at the same time.
144    * However,
145    *   (1) For an mmio instruction with exceptions, it writes back to ROB immediately.
146    *   (2) For an mmio instruction without exceptions, it does not write back.
147    * The mmio instruction will be sent to lower level when it reaches ROB's head.
148    * After uncache response, it will write back through arbiter with loadUnit.
149    *   (3) For cache misses, it is marked miss and sent to dcache later.
150    * After cache refills, it will write back through arbiter with loadUnit.
151    */
152  for (i <- 0 until LoadPipelineWidth) {
153    dataModule.io.wb.wen(i) := false.B
154    vaddrModule.io.wen(i) := false.B
155    when(io.loadIn(i).fire()) {
156      when(io.loadIn(i).bits.miss) {
157        XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n",
158          io.loadIn(i).bits.uop.lqIdx.asUInt,
159          io.loadIn(i).bits.uop.cf.pc,
160          io.loadIn(i).bits.vaddr,
161          io.loadIn(i).bits.paddr,
162          io.loadIn(i).bits.data,
163          io.loadIn(i).bits.mask,
164          io.loadIn(i).bits.forwardData.asUInt,
165          io.loadIn(i).bits.forwardMask.asUInt,
166          io.loadIn(i).bits.mmio
167        )
168      }.otherwise {
169        XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n",
170        io.loadIn(i).bits.uop.lqIdx.asUInt,
171        io.loadIn(i).bits.uop.cf.pc,
172        io.loadIn(i).bits.vaddr,
173        io.loadIn(i).bits.paddr,
174        io.loadIn(i).bits.data,
175        io.loadIn(i).bits.mask,
176        io.loadIn(i).bits.forwardData.asUInt,
177        io.loadIn(i).bits.forwardMask.asUInt,
178        io.loadIn(i).bits.mmio
179      )}
180      val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
181      datavalid(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
182      writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
183
184      val loadWbData = Wire(new LQDataEntry)
185      loadWbData.paddr := io.loadIn(i).bits.paddr
186      loadWbData.mask := io.loadIn(i).bits.mask
187      loadWbData.data := io.loadIn(i).bits.data // fwd data
188      loadWbData.fwdMask := io.loadIn(i).bits.forwardMask
189      dataModule.io.wbWrite(i, loadWbIndex, loadWbData)
190      dataModule.io.wb.wen(i) := true.B
191
192      vaddrModule.io.waddr(i) := loadWbIndex
193      vaddrModule.io.wdata(i) := io.loadIn(i).bits.vaddr
194      vaddrModule.io.wen(i) := true.B
195
196      debug_mmio(loadWbIndex) := io.loadIn(i).bits.mmio
197
198      val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
199      miss(loadWbIndex) := dcacheMissed
200      pending(loadWbIndex) := io.loadIn(i).bits.mmio
201      uop(loadWbIndex).debugInfo.issueTime := io.loadIn(i).bits.uop.debugInfo.issueTime
202    }
203  }
204
205  when(io.dcache.valid) {
206    XSDebug("miss resp: paddr:0x%x data %x\n", io.dcache.bits.addr, io.dcache.bits.data)
207  }
208
209  // Refill 64 bit in a cycle
210  // Refill data comes back from io.dcache.resp
211  dataModule.io.refill.valid := io.dcache.valid
212  dataModule.io.refill.paddr := io.dcache.bits.addr
213  dataModule.io.refill.data := io.dcache.bits.data
214
215  (0 until LoadQueueSize).map(i => {
216    dataModule.io.refill.refillMask(i) := allocated(i) && miss(i)
217    when(dataModule.io.refill.valid && dataModule.io.refill.refillMask(i) && dataModule.io.refill.matchMask(i)) {
218      datavalid(i) := true.B
219      miss(i) := false.B
220    }
221  })
222
223  // Writeback up to 2 missed load insts to CDB
224  //
225  // Pick 2 missed load (data refilled), write them back to cdb
226  // 2 refilled load will be selected from even/odd entry, separately
227
228  // Stage 0
229  // Generate writeback indexes
230
231  def getEvenBits(input: UInt): UInt = {
232    require(input.getWidth == LoadQueueSize)
233    VecInit((0 until LoadQueueSize/2).map(i => {input(2*i)})).asUInt
234  }
235  def getOddBits(input: UInt): UInt = {
236    require(input.getWidth == LoadQueueSize)
237    VecInit((0 until LoadQueueSize/2).map(i => {input(2*i+1)})).asUInt
238  }
239
240  val loadWbSel = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W))) // index selected last cycle
241  val loadWbSelV = Wire(Vec(LoadPipelineWidth, Bool())) // index selected in last cycle is valid
242
243  val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => {
244    allocated(i) && !writebacked(i) && datavalid(i)
245  })).asUInt() // use uint instead vec to reduce verilog lines
246  val evenDeqMask = getEvenBits(deqMask)
247  val oddDeqMask = getOddBits(deqMask)
248  // generate lastCycleSelect mask
249  val evenSelectMask = Mux(io.ldout(0).fire(), getEvenBits(UIntToOH(loadWbSel(0))), 0.U)
250  val oddSelectMask = Mux(io.ldout(1).fire(), getOddBits(UIntToOH(loadWbSel(1))), 0.U)
251  // generate real select vec
252  val loadEvenSelVec = getEvenBits(loadWbSelVec) & ~evenSelectMask
253  val loadOddSelVec = getOddBits(loadWbSelVec) & ~oddSelectMask
254
255  def toVec(a: UInt): Vec[Bool] = {
256    VecInit(a.asBools)
257  }
258
259  val loadWbSelGen = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W)))
260  val loadWbSelVGen = Wire(Vec(LoadPipelineWidth, Bool()))
261  loadWbSelGen(0) := Cat(getFirstOne(toVec(loadEvenSelVec), evenDeqMask), 0.U(1.W))
262  loadWbSelVGen(0):= loadEvenSelVec.asUInt.orR
263  loadWbSelGen(1) := Cat(getFirstOne(toVec(loadOddSelVec), oddDeqMask), 1.U(1.W))
264  loadWbSelVGen(1) := loadOddSelVec.asUInt.orR
265
266  (0 until LoadPipelineWidth).map(i => {
267    loadWbSel(i) := RegNext(loadWbSelGen(i))
268    loadWbSelV(i) := RegNext(loadWbSelVGen(i), init = false.B)
269    when(io.ldout(i).fire()){
270      // Mark them as writebacked, so they will not be selected in the next cycle
271      writebacked(loadWbSel(i)) := true.B
272    }
273  })
274
275  // Stage 1
276  // Use indexes generated in cycle 0 to read data
277  // writeback data to cdb
278  (0 until LoadPipelineWidth).map(i => {
279    // data select
280    dataModule.io.wb.raddr(i) := loadWbSelGen(i)
281    val rdata = dataModule.io.wb.rdata(i).data
282    val seluop = uop(loadWbSel(i))
283    val func = seluop.ctrl.fuOpType
284    val raddr = dataModule.io.wb.rdata(i).paddr
285    val rdataSel = LookupTree(raddr(2, 0), List(
286      "b000".U -> rdata(63, 0),
287      "b001".U -> rdata(63, 8),
288      "b010".U -> rdata(63, 16),
289      "b011".U -> rdata(63, 24),
290      "b100".U -> rdata(63, 32),
291      "b101".U -> rdata(63, 40),
292      "b110".U -> rdata(63, 48),
293      "b111".U -> rdata(63, 56)
294    ))
295    val rdataPartialLoad = rdataHelper(seluop, rdataSel)
296
297    // writeback missed int/fp load
298    //
299    // Int load writeback will finish (if not blocked) in one cycle
300    io.ldout(i).bits.uop := seluop
301    io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr)
302    io.ldout(i).bits.data := rdataPartialLoad
303    io.ldout(i).bits.redirectValid := false.B
304    io.ldout(i).bits.redirect := DontCare
305    io.ldout(i).bits.brUpdate := DontCare
306    io.ldout(i).bits.debug.isMMIO := debug_mmio(loadWbSel(i))
307    io.ldout(i).bits.debug.isPerfCnt := false.B
308    io.ldout(i).bits.fflags := DontCare
309    io.ldout(i).valid := loadWbSelV(i)
310
311    when(io.ldout(i).fire()) {
312      XSInfo("int load miss write to cbd roqidx %d lqidx %d pc 0x%x mmio %x\n",
313        io.ldout(i).bits.uop.roqIdx.asUInt,
314        io.ldout(i).bits.uop.lqIdx.asUInt,
315        io.ldout(i).bits.uop.cf.pc,
316        debug_mmio(loadWbSel(i))
317      )
318    }
319
320  })
321
322  /**
323    * Load commits
324    *
325    * When load commited, mark it as !allocated and move deqPtrExt forward.
326    */
327  (0 until CommitWidth).map(i => {
328    when(loadCommit(i)) {
329      allocated(mcommitIdx(i)) := false.B
330      XSDebug("load commit %d: idx %d %x\n", i.U, mcommitIdx(i), uop(mcommitIdx(i)).cf.pc)
331    }
332  })
333
334  def getFirstOne(mask: Vec[Bool], startMask: UInt) = {
335    val length = mask.length
336    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
337    val highBitsUint = Cat(highBits.reverse)
338    PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt))
339  }
340
341  def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = {
342    assert(valid.length == uop.length)
343    assert(valid.length == 2)
344    Mux(valid(0) && valid(1),
345      Mux(isAfter(uop(0).roqIdx, uop(1).roqIdx), uop(1), uop(0)),
346      Mux(valid(0) && !valid(1), uop(0), uop(1)))
347  }
348
349  def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = {
350    assert(valid.length == uop.length)
351    val length = valid.length
352    (0 until length).map(i => {
353      (0 until length).map(j => {
354        Mux(valid(i) && valid(j),
355          isAfter(uop(i).roqIdx, uop(j).roqIdx),
356          Mux(!valid(i), true.B, false.B))
357      })
358    })
359  }
360
361  /**
362    * Memory violation detection
363    *
364    * When store writes back, it searches LoadQueue for younger load instructions
365    * with the same load physical address. They loaded wrong data and need re-execution.
366    *
367    * Cycle 0: Store Writeback
368    *   Generate match vector for store address with rangeMask(stPtr, enqPtr).
369    *   Besides, load instructions in LoadUnit_S1 and S2 are also checked.
370    * Cycle 1: Redirect Generation
371    *   There're three possible types of violations. Choose the oldest load.
372    *   Prepare redirect request according to the detected violation.
373    * Cycle 2: Redirect Fire
374    *   Fire redirect request (if valid)
375    */
376  io.load_s1 := DontCare
377  def detectRollback(i: Int) = {
378    val startIndex = io.storeIn(i).bits.uop.lqIdx.value
379    val lqIdxMask = UIntToMask(startIndex, LoadQueueSize)
380    val xorMask = lqIdxMask ^ enqMask
381    val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt(0).flag
382    val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
383
384    // check if load already in lq needs to be rolledback
385    dataModule.io.violation(i).paddr := io.storeIn(i).bits.paddr
386    dataModule.io.violation(i).mask := io.storeIn(i).bits.mask
387    val addrMaskMatch = RegNext(dataModule.io.violation(i).violationMask)
388    val entryNeedCheck = RegNext(VecInit((0 until LoadQueueSize).map(j => {
389      allocated(j) && toEnqPtrMask(j) && (datavalid(j) || miss(j))
390    })))
391    val lqViolationVec = VecInit((0 until LoadQueueSize).map(j => {
392      addrMaskMatch(j) && entryNeedCheck(j)
393    }))
394    val lqViolation = lqViolationVec.asUInt().orR()
395    val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask))
396    val lqViolationUop = uop(lqViolationIndex)
397    // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag
398    // lqViolationUop.lqIdx.value := lqViolationIndex
399    XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n")
400
401    // when l/s writeback to roq together, check if rollback is needed
402    val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
403      io.loadIn(j).valid &&
404        isAfter(io.loadIn(j).bits.uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
405        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) &&
406        (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR
407    })))
408    val wbViolation = wbViolationVec.asUInt().orR()
409    val wbViolationUop = getOldestInTwo(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits.uop))))
410    XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n")
411
412    // check if rollback is needed for load in l1
413    val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
414      io.load_s1(j).valid && // L1 valid
415        isAfter(io.load_s1(j).uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
416        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.load_s1(j).paddr(PAddrBits - 1, 3) &&
417        (io.storeIn(i).bits.mask & io.load_s1(j).mask).orR
418    })))
419    val l1Violation = l1ViolationVec.asUInt().orR()
420    val l1ViolationUop = getOldestInTwo(l1ViolationVec, RegNext(VecInit(io.load_s1.map(_.uop))))
421    XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n")
422
423    val rollbackValidVec = Seq(lqViolation, wbViolation, l1Violation)
424    val rollbackUopVec = Seq(lqViolationUop, wbViolationUop, l1ViolationUop)
425
426    val mask = getAfterMask(rollbackValidVec, rollbackUopVec)
427    val oneAfterZero = mask(1)(0)
428    val rollbackUop = Mux(oneAfterZero && mask(2)(0),
429      rollbackUopVec(0),
430      Mux(!oneAfterZero && mask(2)(1), rollbackUopVec(1), rollbackUopVec(2)))
431
432    XSDebug(
433      l1Violation,
434      "need rollback (l4 load) pc %x roqidx %d target %x\n",
435      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, l1ViolationUop.roqIdx.asUInt
436    )
437    XSDebug(
438      lqViolation,
439      "need rollback (ld wb before store) pc %x roqidx %d target %x\n",
440      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, lqViolationUop.roqIdx.asUInt
441    )
442    XSDebug(
443      wbViolation,
444      "need rollback (ld/st wb together) pc %x roqidx %d target %x\n",
445      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, wbViolationUop.roqIdx.asUInt
446    )
447
448    (RegNext(io.storeIn(i).valid) && Cat(rollbackValidVec).orR, rollbackUop)
449  }
450
451  // rollback check
452  val rollback = Wire(Vec(StorePipelineWidth, Valid(new MicroOp)))
453  for (i <- 0 until StorePipelineWidth) {
454    val detectedRollback = detectRollback(i)
455    rollback(i).valid := detectedRollback._1
456    rollback(i).bits := detectedRollback._2
457  }
458
459  def rollbackSel(a: Valid[MicroOp], b: Valid[MicroOp]): ValidIO[MicroOp] = {
460    Mux(
461      a.valid,
462      Mux(
463        b.valid,
464        Mux(isAfter(a.bits.roqIdx, b.bits.roqIdx), b, a), // a,b both valid, sel oldest
465        a // sel a
466      ),
467      b // sel b
468    )
469  }
470
471  val rollbackSelected = ParallelOperation(rollback, rollbackSel)
472  val lastCycleRedirect = RegNext(io.brqRedirect)
473  val lastCycleFlush = RegNext(io.flush)
474
475  // S2: select rollback and generate rollback request
476  // Note that we use roqIdx - 1.U to flush the load instruction itself.
477  // Thus, here if last cycle's roqIdx equals to this cycle's roqIdx, it still triggers the redirect.
478  val rollbackGen = Wire(Valid(new Redirect))
479  val rollbackReg = Reg(Valid(new Redirect))
480  rollbackGen.valid := rollbackSelected.valid &&
481    (!lastCycleRedirect.valid || !isAfter(rollbackSelected.bits.roqIdx, lastCycleRedirect.bits.roqIdx)) &&
482    !lastCycleFlush
483
484  rollbackGen.bits.roqIdx := rollbackSelected.bits.roqIdx
485  rollbackGen.bits.level := RedirectLevel.flush
486  rollbackGen.bits.interrupt := DontCare
487  rollbackGen.bits.pc := DontCare
488  rollbackGen.bits.target := rollbackSelected.bits.cf.pc
489  rollbackGen.bits.brTag := rollbackSelected.bits.brTag
490
491  rollbackReg := rollbackGen
492
493  // S3: fire rollback request
494  io.rollback := rollbackReg
495  io.rollback.valid := rollbackReg.valid &&
496    (!lastCycleRedirect.valid || !isAfter(rollbackReg.bits.roqIdx, lastCycleRedirect.bits.roqIdx)) &&
497    !lastCycleFlush
498
499  when(io.rollback.valid) {
500    XSDebug("Mem rollback: pc %x roqidx %d\n", io.rollback.bits.pc, io.rollback.bits.roqIdx.asUInt)
501  }
502
503  /**
504    * Memory mapped IO / other uncached operations
505    *
506    */
507  io.uncache.req.valid := pending(deqPtr) && allocated(deqPtr) &&
508    io.commits.info(0).commitType === CommitType.LOAD &&
509    io.roqDeqPtr === uop(deqPtr).roqIdx &&
510    !io.commits.isWalk
511
512  dataModule.io.uncache.raddr := deqPtrExtNext.value
513
514  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
515  io.uncache.req.bits.addr := dataModule.io.uncache.rdata.paddr
516  io.uncache.req.bits.data := dataModule.io.uncache.rdata.data
517  io.uncache.req.bits.mask := dataModule.io.uncache.rdata.mask
518
519  io.uncache.req.bits.meta.id       := DontCare
520  io.uncache.req.bits.meta.vaddr    := DontCare
521  io.uncache.req.bits.meta.paddr    := dataModule.io.uncache.rdata.paddr
522  io.uncache.req.bits.meta.uop      := uop(deqPtr)
523  io.uncache.req.bits.meta.mmio     := true.B
524  io.uncache.req.bits.meta.tlb_miss := false.B
525  io.uncache.req.bits.meta.mask     := dataModule.io.uncache.rdata.mask
526  io.uncache.req.bits.meta.replay   := false.B
527
528  io.uncache.resp.ready := true.B
529
530  when (io.uncache.req.fire()) {
531    pending(deqPtr) := false.B
532
533    XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n",
534      uop(deqPtr).cf.pc,
535      io.uncache.req.bits.addr,
536      io.uncache.req.bits.data,
537      io.uncache.req.bits.cmd,
538      io.uncache.req.bits.mask
539    )
540  }
541
542  dataModule.io.uncache.wen := false.B
543  when(io.uncache.resp.fire()){
544    datavalid(deqPtr) := true.B
545    dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0))
546    dataModule.io.uncache.wen := true.B
547
548    XSDebug("uncache resp: data %x\n", io.dcache.bits.data)
549  }
550
551  // Read vaddr for mem exception
552  vaddrModule.io.raddr(0) := io.exceptionAddr.lsIdx.lqIdx.value
553  io.exceptionAddr.vaddr := vaddrModule.io.rdata(0)
554
555  // misprediction recovery / exception redirect
556  // invalidate lq term using robIdx
557  val needCancel = Wire(Vec(LoadQueueSize, Bool()))
558  for (i <- 0 until LoadQueueSize) {
559    needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect, io.flush) && allocated(i) && !commited(i)
560    when (needCancel(i)) {
561        allocated(i) := false.B
562    }
563  }
564
565  /**
566    * update pointers
567    */
568  val lastCycleCancelCount = PopCount(RegNext(needCancel))
569  // when io.brqRedirect.valid, we don't allow eneuque even though it may fire.
570  val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept && !(io.brqRedirect.valid || io.flush), PopCount(io.enq.req.map(_.valid)), 0.U)
571  when (lastCycleRedirect.valid || lastCycleFlush) {
572    // we recover the pointers in the next cycle after redirect
573    enqPtrExt := VecInit(enqPtrExt.map(_ - lastCycleCancelCount))
574  }.otherwise {
575    enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
576  }
577
578  val commitCount = PopCount(loadCommit)
579  deqPtrExtNext := deqPtrExt + commitCount
580  deqPtrExt := deqPtrExtNext
581
582  val lastLastCycleRedirect = RegNext(lastCycleRedirect.valid)
583  val trueValidCounter = distanceBetween(enqPtrExt(0), deqPtrExt)
584  validCounter := Mux(lastLastCycleRedirect,
585    trueValidCounter,
586    validCounter + enqNumber - commitCount
587  )
588
589  allowEnqueue := Mux(io.brqRedirect.valid || io.flush,
590    false.B,
591    Mux(lastLastCycleRedirect,
592      trueValidCounter <= (LoadQueueSize - RenameWidth).U,
593      validCounter + enqNumber <= (LoadQueueSize - RenameWidth).U
594    )
595  )
596
597  // debug info
598  XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr)
599
600  def PrintFlag(flag: Bool, name: String): Unit = {
601    when(flag) {
602      XSDebug(false, true.B, name)
603    }.otherwise {
604      XSDebug(false, true.B, " ")
605    }
606  }
607
608  for (i <- 0 until LoadQueueSize) {
609    if (i % 4 == 0) XSDebug("")
610    XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.debug(i).paddr)
611    PrintFlag(allocated(i), "a")
612    PrintFlag(allocated(i) && datavalid(i), "v")
613    PrintFlag(allocated(i) && writebacked(i), "w")
614    PrintFlag(allocated(i) && commited(i), "c")
615    PrintFlag(allocated(i) && miss(i), "m")
616    // PrintFlag(allocated(i) && listening(i), "l")
617    PrintFlag(allocated(i) && pending(i), "p")
618    XSDebug(false, true.B, " ")
619    if (i % 4 == 3 || i == LoadQueueSize - 1) XSDebug(false, true.B, "\n")
620  }
621
622}
623