xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueue.scala (revision 5c5bd416ce761d956348a8e2fbbf268922371d8b)
1package xiangshan.mem
2
3import chisel3._
4import chisel3.util._
5import freechips.rocketchip.tile.HasFPUParameters
6import utils._
7import xiangshan._
8import xiangshan.cache._
9import xiangshan.cache.{DCacheLineIO, DCacheWordIO, MemoryOpConstants, TlbRequestIO}
10import xiangshan.backend.LSUOpType
11import xiangshan.mem._
12import xiangshan.backend.roq.RoqLsqIO
13import xiangshan.backend.fu.HasExceptionNO
14
15
16class LqPtr extends CircularQueuePtr(LqPtr.LoadQueueSize) { }
17
18object LqPtr extends HasXSParameter {
19  def apply(f: Bool, v: UInt): LqPtr = {
20    val ptr = Wire(new LqPtr)
21    ptr.flag := f
22    ptr.value := v
23    ptr
24  }
25}
26
27trait HasFpLoadHelper { this: HasFPUParameters =>
28  def fpRdataHelper(uop: MicroOp, rdata: UInt): UInt = {
29    LookupTree(uop.ctrl.fuOpType, List(
30      LSUOpType.lw   -> recode(rdata(31, 0), S),
31      LSUOpType.ld   -> recode(rdata(63, 0), D)
32    ))
33  }
34}
35trait HasLoadHelper { this: XSModule =>
36  def rdataHelper(uop: MicroOp, rdata: UInt): UInt = {
37    val fpWen = uop.ctrl.fpWen
38    LookupTree(uop.ctrl.fuOpType, List(
39      LSUOpType.lb   -> SignExt(rdata(7, 0) , XLEN),
40      LSUOpType.lh   -> SignExt(rdata(15, 0), XLEN),
41      LSUOpType.lw   -> Mux(fpWen, Cat(Fill(32, 1.U(1.W)), rdata(31, 0)), SignExt(rdata(31, 0), XLEN)),
42      LSUOpType.ld   -> Mux(fpWen, rdata, SignExt(rdata(63, 0), XLEN)),
43      LSUOpType.lbu  -> ZeroExt(rdata(7, 0) , XLEN),
44      LSUOpType.lhu  -> ZeroExt(rdata(15, 0), XLEN),
45      LSUOpType.lwu  -> ZeroExt(rdata(31, 0), XLEN),
46    ))
47  }
48}
49
50class LqEnqIO extends XSBundle {
51  val canAccept = Output(Bool())
52  val sqCanAccept = Input(Bool())
53  val needAlloc = Vec(RenameWidth, Input(Bool()))
54  val req = Vec(RenameWidth, Flipped(ValidIO(new MicroOp)))
55  val resp = Vec(RenameWidth, Output(new LqPtr))
56}
57
58// Load Queue
59class LoadQueue extends XSModule
60  with HasDCacheParameters
61  with HasCircularQueuePtrHelper
62  with HasLoadHelper
63  with HasExceptionNO
64{
65  val io = IO(new Bundle() {
66    val enq = new LqEnqIO
67    val brqRedirect = Flipped(ValidIO(new Redirect))
68    val flush = Input(Bool())
69    val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle)))
70    val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle)))
71    val loadDataForwarded = Vec(LoadPipelineWidth, Input(Bool()))
72    val needReplayFromRS = Vec(LoadPipelineWidth, Input(Bool()))
73    val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback int load
74    val load_s1 = Vec(LoadPipelineWidth, Flipped(new MaskedLoadForwardQueryIO))
75    val roq = Flipped(new RoqLsqIO)
76    val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store
77    val dcache = Flipped(ValidIO(new Refill))
78    val uncache = new DCacheWordIO
79    val exceptionAddr = new ExceptionAddrIO
80  })
81
82  val uop = Reg(Vec(LoadQueueSize, new MicroOp))
83  // val data = Reg(Vec(LoadQueueSize, new LsRoqEntry))
84  val dataModule = Module(new LoadQueueData(LoadQueueSize, wbNumRead = LoadPipelineWidth, wbNumWrite = LoadPipelineWidth))
85  dataModule.io := DontCare
86  val vaddrModule = Module(new SyncDataModuleTemplate(UInt(VAddrBits.W), LoadQueueSize, numRead = 1, numWrite = LoadPipelineWidth))
87  vaddrModule.io := DontCare
88  val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated
89  val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid
90  val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
91  val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request
92  // val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result
93  val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq
94
95  val debug_mmio = Reg(Vec(LoadQueueSize, Bool())) // mmio: inst is an mmio inst
96  val debug_paddr = Reg(Vec(LoadQueueSize, UInt(PAddrBits.W))) // mmio: inst is an mmio inst
97
98  val enqPtrExt = RegInit(VecInit((0 until RenameWidth).map(_.U.asTypeOf(new LqPtr))))
99  val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
100  val deqPtrExtNext = Wire(new LqPtr)
101  val allowEnqueue = RegInit(true.B)
102
103  val enqPtr = enqPtrExt(0).value
104  val deqPtr = deqPtrExt.value
105
106  val deqMask = UIntToMask(deqPtr, LoadQueueSize)
107  val enqMask = UIntToMask(enqPtr, LoadQueueSize)
108
109  val commitCount = RegNext(io.roq.lcommit)
110
111  /**
112    * Enqueue at dispatch
113    *
114    * Currently, LoadQueue only allows enqueue when #emptyEntries > RenameWidth(EnqWidth)
115    */
116  io.enq.canAccept := allowEnqueue
117
118  for (i <- 0 until RenameWidth) {
119    val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i))
120    val lqIdx = enqPtrExt(offset)
121    val index = lqIdx.value
122    when (io.enq.req(i).valid && io.enq.canAccept && io.enq.sqCanAccept && !(io.brqRedirect.valid || io.flush)) {
123      uop(index) := io.enq.req(i).bits
124      allocated(index) := true.B
125      datavalid(index) := false.B
126      writebacked(index) := false.B
127      miss(index) := false.B
128      // listening(index) := false.B
129      pending(index) := false.B
130    }
131    io.enq.resp(i) := lqIdx
132  }
133  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
134
135  /**
136    * Writeback load from load units
137    *
138    * Most load instructions writeback to regfile at the same time.
139    * However,
140    *   (1) For an mmio instruction with exceptions, it writes back to ROB immediately.
141    *   (2) For an mmio instruction without exceptions, it does not write back.
142    * The mmio instruction will be sent to lower level when it reaches ROB's head.
143    * After uncache response, it will write back through arbiter with loadUnit.
144    *   (3) For cache misses, it is marked miss and sent to dcache later.
145    * After cache refills, it will write back through arbiter with loadUnit.
146    */
147  for (i <- 0 until LoadPipelineWidth) {
148    dataModule.io.wb.wen(i) := false.B
149    val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
150    when(io.loadIn(i).fire()) {
151      when(io.loadIn(i).bits.miss) {
152        XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n",
153          io.loadIn(i).bits.uop.lqIdx.asUInt,
154          io.loadIn(i).bits.uop.cf.pc,
155          io.loadIn(i).bits.vaddr,
156          io.loadIn(i).bits.paddr,
157          io.loadIn(i).bits.data,
158          io.loadIn(i).bits.mask,
159          io.loadIn(i).bits.forwardData.asUInt,
160          io.loadIn(i).bits.forwardMask.asUInt,
161          io.loadIn(i).bits.mmio
162        )
163      }.otherwise {
164        XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n",
165        io.loadIn(i).bits.uop.lqIdx.asUInt,
166        io.loadIn(i).bits.uop.cf.pc,
167        io.loadIn(i).bits.vaddr,
168        io.loadIn(i).bits.paddr,
169        io.loadIn(i).bits.data,
170        io.loadIn(i).bits.mask,
171        io.loadIn(i).bits.forwardData.asUInt,
172        io.loadIn(i).bits.forwardMask.asUInt,
173        io.loadIn(i).bits.mmio
174      )}
175      datavalid(loadWbIndex) := (!io.loadIn(i).bits.miss || io.loadDataForwarded(i)) &&
176        !io.loadIn(i).bits.mmio && // mmio data is not valid until we finished uncache access
177        !io.needReplayFromRS(i) // do not writeback if that inst will be resend from rs
178      writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
179
180      val loadWbData = Wire(new LQDataEntry)
181      loadWbData.paddr := io.loadIn(i).bits.paddr
182      loadWbData.mask := io.loadIn(i).bits.mask
183      loadWbData.data := io.loadIn(i).bits.forwardData.asUInt // fwd data
184      loadWbData.fwdMask := io.loadIn(i).bits.forwardMask
185      dataModule.io.wbWrite(i, loadWbIndex, loadWbData)
186      dataModule.io.wb.wen(i) := true.B
187
188
189      debug_mmio(loadWbIndex) := io.loadIn(i).bits.mmio
190      debug_paddr(loadWbIndex) := io.loadIn(i).bits.paddr
191
192      val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
193      miss(loadWbIndex) := dcacheMissed && !io.loadDataForwarded(i) && !io.needReplayFromRS(i)
194      pending(loadWbIndex) := io.loadIn(i).bits.mmio
195      uop(loadWbIndex).debugInfo.issueTime := io.loadIn(i).bits.uop.debugInfo.issueTime
196    }
197    // vaddrModule write is delayed, as vaddrModule will not be read right after write
198    vaddrModule.io.waddr(i) := RegNext(loadWbIndex)
199    vaddrModule.io.wdata(i) := RegNext(io.loadIn(i).bits.vaddr)
200    vaddrModule.io.wen(i) := RegNext(io.loadIn(i).fire())
201  }
202
203  when(io.dcache.valid) {
204    XSDebug("miss resp: paddr:0x%x data %x\n", io.dcache.bits.addr, io.dcache.bits.data)
205  }
206
207  // Refill 64 bit in a cycle
208  // Refill data comes back from io.dcache.resp
209  dataModule.io.refill.valid := io.dcache.valid
210  dataModule.io.refill.paddr := io.dcache.bits.addr
211  dataModule.io.refill.data := io.dcache.bits.data
212
213  (0 until LoadQueueSize).map(i => {
214    dataModule.io.refill.refillMask(i) := allocated(i) && miss(i)
215    when(dataModule.io.refill.valid && dataModule.io.refill.refillMask(i) && dataModule.io.refill.matchMask(i)) {
216      datavalid(i) := true.B
217      miss(i) := false.B
218    }
219  })
220
221  // Writeback up to 2 missed load insts to CDB
222  //
223  // Pick 2 missed load (data refilled), write them back to cdb
224  // 2 refilled load will be selected from even/odd entry, separately
225
226  // Stage 0
227  // Generate writeback indexes
228
229  def getEvenBits(input: UInt): UInt = {
230    require(input.getWidth == LoadQueueSize)
231    VecInit((0 until LoadQueueSize/2).map(i => {input(2*i)})).asUInt
232  }
233  def getOddBits(input: UInt): UInt = {
234    require(input.getWidth == LoadQueueSize)
235    VecInit((0 until LoadQueueSize/2).map(i => {input(2*i+1)})).asUInt
236  }
237
238  val loadWbSel = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W))) // index selected last cycle
239  val loadWbSelV = Wire(Vec(LoadPipelineWidth, Bool())) // index selected in last cycle is valid
240
241  val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => {
242    allocated(i) && !writebacked(i) && datavalid(i)
243  })).asUInt() // use uint instead vec to reduce verilog lines
244  val evenDeqMask = getEvenBits(deqMask)
245  val oddDeqMask = getOddBits(deqMask)
246  // generate lastCycleSelect mask
247  val evenSelectMask = Mux(io.ldout(0).fire(), getEvenBits(UIntToOH(loadWbSel(0))), 0.U)
248  val oddSelectMask = Mux(io.ldout(1).fire(), getOddBits(UIntToOH(loadWbSel(1))), 0.U)
249  // generate real select vec
250  val loadEvenSelVec = getEvenBits(loadWbSelVec) & ~evenSelectMask
251  val loadOddSelVec = getOddBits(loadWbSelVec) & ~oddSelectMask
252
253  def toVec(a: UInt): Vec[Bool] = {
254    VecInit(a.asBools)
255  }
256
257  val loadWbSelGen = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W)))
258  val loadWbSelVGen = Wire(Vec(LoadPipelineWidth, Bool()))
259  loadWbSelGen(0) := Cat(getFirstOne(toVec(loadEvenSelVec), evenDeqMask), 0.U(1.W))
260  loadWbSelVGen(0):= loadEvenSelVec.asUInt.orR
261  loadWbSelGen(1) := Cat(getFirstOne(toVec(loadOddSelVec), oddDeqMask), 1.U(1.W))
262  loadWbSelVGen(1) := loadOddSelVec.asUInt.orR
263
264  (0 until LoadPipelineWidth).map(i => {
265    loadWbSel(i) := RegNext(loadWbSelGen(i))
266    loadWbSelV(i) := RegNext(loadWbSelVGen(i), init = false.B)
267    when(io.ldout(i).fire()){
268      // Mark them as writebacked, so they will not be selected in the next cycle
269      writebacked(loadWbSel(i)) := true.B
270    }
271  })
272
273  // Stage 1
274  // Use indexes generated in cycle 0 to read data
275  // writeback data to cdb
276  (0 until LoadPipelineWidth).map(i => {
277    // data select
278    dataModule.io.wb.raddr(i) := loadWbSelGen(i)
279    val rdata = dataModule.io.wb.rdata(i).data
280    val seluop = uop(loadWbSel(i))
281    val func = seluop.ctrl.fuOpType
282    val raddr = dataModule.io.wb.rdata(i).paddr
283    val rdataSel = LookupTree(raddr(2, 0), List(
284      "b000".U -> rdata(63, 0),
285      "b001".U -> rdata(63, 8),
286      "b010".U -> rdata(63, 16),
287      "b011".U -> rdata(63, 24),
288      "b100".U -> rdata(63, 32),
289      "b101".U -> rdata(63, 40),
290      "b110".U -> rdata(63, 48),
291      "b111".U -> rdata(63, 56)
292    ))
293    val rdataPartialLoad = rdataHelper(seluop, rdataSel)
294
295    // writeback missed int/fp load
296    //
297    // Int load writeback will finish (if not blocked) in one cycle
298    io.ldout(i).bits.uop := seluop
299    io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr)
300    io.ldout(i).bits.data := rdataPartialLoad
301    io.ldout(i).bits.redirectValid := false.B
302    io.ldout(i).bits.redirect := DontCare
303    io.ldout(i).bits.debug.isMMIO := debug_mmio(loadWbSel(i))
304    io.ldout(i).bits.debug.isPerfCnt := false.B
305    io.ldout(i).bits.debug.paddr := debug_paddr(loadWbSel(i))
306    io.ldout(i).bits.fflags := DontCare
307    io.ldout(i).valid := loadWbSelV(i)
308
309    when(io.ldout(i).fire()) {
310      XSInfo("int load miss write to cbd roqidx %d lqidx %d pc 0x%x mmio %x\n",
311        io.ldout(i).bits.uop.roqIdx.asUInt,
312        io.ldout(i).bits.uop.lqIdx.asUInt,
313        io.ldout(i).bits.uop.cf.pc,
314        debug_mmio(loadWbSel(i))
315      )
316    }
317
318  })
319
320  /**
321    * Load commits
322    *
323    * When load commited, mark it as !allocated and move deqPtrExt forward.
324    */
325  (0 until CommitWidth).map(i => {
326    when(commitCount > i.U){
327      allocated(deqPtr+i.U) := false.B
328    }
329  })
330
331  def getFirstOne(mask: Vec[Bool], startMask: UInt) = {
332    val length = mask.length
333    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
334    val highBitsUint = Cat(highBits.reverse)
335    PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt))
336  }
337
338  def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = {
339    assert(valid.length == uop.length)
340    assert(valid.length == 2)
341    Mux(valid(0) && valid(1),
342      Mux(isAfter(uop(0).roqIdx, uop(1).roqIdx), uop(1), uop(0)),
343      Mux(valid(0) && !valid(1), uop(0), uop(1)))
344  }
345
346  def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = {
347    assert(valid.length == uop.length)
348    val length = valid.length
349    (0 until length).map(i => {
350      (0 until length).map(j => {
351        Mux(valid(i) && valid(j),
352          isAfter(uop(i).roqIdx, uop(j).roqIdx),
353          Mux(!valid(i), true.B, false.B))
354      })
355    })
356  }
357
358  /**
359    * Memory violation detection
360    *
361    * When store writes back, it searches LoadQueue for younger load instructions
362    * with the same load physical address. They loaded wrong data and need re-execution.
363    *
364    * Cycle 0: Store Writeback
365    *   Generate match vector for store address with rangeMask(stPtr, enqPtr).
366    *   Besides, load instructions in LoadUnit_S1 and S2 are also checked.
367    * Cycle 1: Redirect Generation
368    *   There're three possible types of violations, up to 6 possible redirect requests.
369    *   Choose the oldest load (part 1). (4 + 2) -> (1 + 2)
370    * Cycle 2: Redirect Fire
371    *   Choose the oldest load (part 2). (3 -> 1)
372    *   Prepare redirect request according to the detected violation.
373    *   Fire redirect request (if valid)
374    */
375
376  // stage 0:        lq l1 wb     l1 wb lq
377  //                 |  |  |      |  |  |  (paddr match)
378  // stage 1:        lq l1 wb     l1 wb lq
379  //                 |  |  |      |  |  |
380  //                 |  |------------|  |
381  //                 |        |         |
382  // stage 2:        lq      l1wb       lq
383  //                 |        |         |
384  //                 --------------------
385  //                          |
386  //                      rollback req
387  io.load_s1 := DontCare
388  def detectRollback(i: Int) = {
389    val startIndex = io.storeIn(i).bits.uop.lqIdx.value
390    val lqIdxMask = UIntToMask(startIndex, LoadQueueSize)
391    val xorMask = lqIdxMask ^ enqMask
392    val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt(0).flag
393    val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
394
395    // check if load already in lq needs to be rolledback
396    dataModule.io.violation(i).paddr := io.storeIn(i).bits.paddr
397    dataModule.io.violation(i).mask := io.storeIn(i).bits.mask
398    val addrMaskMatch = RegNext(dataModule.io.violation(i).violationMask)
399    val entryNeedCheck = RegNext(VecInit((0 until LoadQueueSize).map(j => {
400      allocated(j) && toEnqPtrMask(j) && (datavalid(j) || miss(j))
401    })))
402    val lqViolationVec = VecInit((0 until LoadQueueSize).map(j => {
403      addrMaskMatch(j) && entryNeedCheck(j)
404    }))
405    val lqViolation = lqViolationVec.asUInt().orR()
406    val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask))
407    val lqViolationUop = uop(lqViolationIndex)
408    // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag
409    // lqViolationUop.lqIdx.value := lqViolationIndex
410    XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n")
411
412    // when l/s writeback to roq together, check if rollback is needed
413    val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
414      io.loadIn(j).valid &&
415        isAfter(io.loadIn(j).bits.uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
416        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) &&
417        (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR
418    })))
419    val wbViolation = wbViolationVec.asUInt().orR()
420    val wbViolationUop = getOldestInTwo(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits.uop))))
421    XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n")
422
423    // check if rollback is needed for load in l1
424    val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
425      io.load_s1(j).valid && // L1 valid
426        isAfter(io.load_s1(j).uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
427        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.load_s1(j).paddr(PAddrBits - 1, 3) &&
428        (io.storeIn(i).bits.mask & io.load_s1(j).mask).orR
429    })))
430    val l1Violation = l1ViolationVec.asUInt().orR()
431    val l1ViolationUop = getOldestInTwo(l1ViolationVec, RegNext(VecInit(io.load_s1.map(_.uop))))
432    XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n")
433
434    XSDebug(
435      l1Violation,
436      "need rollback (l1 load) pc %x roqidx %d target %x\n",
437      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, l1ViolationUop.roqIdx.asUInt
438    )
439    XSDebug(
440      lqViolation,
441      "need rollback (ld wb before store) pc %x roqidx %d target %x\n",
442      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, lqViolationUop.roqIdx.asUInt
443    )
444    XSDebug(
445      wbViolation,
446      "need rollback (ld/st wb together) pc %x roqidx %d target %x\n",
447      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, wbViolationUop.roqIdx.asUInt
448    )
449
450    ((lqViolation, lqViolationUop), (wbViolation, wbViolationUop), (l1Violation, l1ViolationUop))
451  }
452
453  def rollbackSel(a: Valid[MicroOp], b: Valid[MicroOp]): ValidIO[MicroOp] = {
454    Mux(
455      a.valid,
456      Mux(
457        b.valid,
458        Mux(isAfter(a.bits.roqIdx, b.bits.roqIdx), b, a), // a,b both valid, sel oldest
459        a // sel a
460      ),
461      b // sel b
462    )
463  }
464  val lastCycleRedirect = RegNext(io.brqRedirect)
465  val lastlastCycleRedirect = RegNext(lastCycleRedirect)
466  val lastCycleFlush = RegNext(io.flush)
467  val lastlastCycleFlush = RegNext(lastCycleFlush)
468
469  // S2: select rollback (part1) and generate rollback request
470  // rollback check
471  // Wb/L1 rollback seq check is done in s2
472  val rollbackWb = Wire(Vec(StorePipelineWidth, Valid(new MicroOp)))
473  val rollbackL1 = Wire(Vec(StorePipelineWidth, Valid(new MicroOp)))
474  val rollbackL1Wb = Wire(Vec(StorePipelineWidth*2, Valid(new MicroOp)))
475  // Lq rollback seq check is done in s3 (next stage), as getting rollbackLq MicroOp is slow
476  val rollbackLq = Wire(Vec(StorePipelineWidth, Valid(new MicroOp)))
477  for (i <- 0 until StorePipelineWidth) {
478    val detectedRollback = detectRollback(i)
479    rollbackLq(i).valid := detectedRollback._1._1 && RegNext(io.storeIn(i).valid)
480    rollbackLq(i).bits := detectedRollback._1._2
481    rollbackWb(i).valid := detectedRollback._2._1 && RegNext(io.storeIn(i).valid)
482    rollbackWb(i).bits := detectedRollback._2._2
483    rollbackL1(i).valid := detectedRollback._3._1 && RegNext(io.storeIn(i).valid)
484    rollbackL1(i).bits := detectedRollback._3._2
485    rollbackL1Wb(2*i) := rollbackL1(i)
486    rollbackL1Wb(2*i+1) := rollbackWb(i)
487  }
488
489  val rollbackL1WbSelected = ParallelOperation(rollbackL1Wb, rollbackSel)
490  val rollbackL1WbVReg = RegNext(rollbackL1WbSelected.valid)
491  val rollbackL1WbReg = RegEnable(rollbackL1WbSelected.bits, rollbackL1WbSelected.valid)
492  val rollbackLq0VReg = RegNext(rollbackLq(0).valid)
493  val rollbackLq0Reg = RegEnable(rollbackLq(0).bits, rollbackLq(0).valid)
494  val rollbackLq1VReg = RegNext(rollbackLq(1).valid)
495  val rollbackLq1Reg = RegEnable(rollbackLq(1).bits, rollbackLq(1).valid)
496
497  // S3: select rollback (part2), generate rollback request, then fire rollback request
498  // Note that we use roqIdx - 1.U to flush the load instruction itself.
499  // Thus, here if last cycle's roqIdx equals to this cycle's roqIdx, it still triggers the redirect.
500
501  // FIXME: this is ugly
502  val rollbackValidVec = Seq(rollbackL1WbVReg, rollbackLq0VReg, rollbackLq1VReg)
503  val rollbackUopVec = Seq(rollbackL1WbReg, rollbackLq0Reg, rollbackLq1Reg)
504
505  // select uop in parallel
506  val mask = getAfterMask(rollbackValidVec, rollbackUopVec)
507  val oneAfterZero = mask(1)(0)
508  val rollbackUop = Mux(oneAfterZero && mask(2)(0),
509    rollbackUopVec(0),
510    Mux(!oneAfterZero && mask(2)(1), rollbackUopVec(1), rollbackUopVec(2)))
511
512  // check if rollback request is still valid in parallel
513  val rollbackValidVecChecked = Wire(Vec(3, Bool()))
514  for(((v, uop), idx) <- rollbackValidVec.zip(rollbackUopVec).zipWithIndex) {
515    rollbackValidVecChecked(idx) := v &&
516      (!lastCycleRedirect.valid || isBefore(uop.roqIdx, lastCycleRedirect.bits.roqIdx)) &&
517      (!lastlastCycleRedirect.valid || isBefore(uop.roqIdx, lastlastCycleRedirect.bits.roqIdx))
518  }
519
520  io.rollback.bits.roqIdx := rollbackUop.roqIdx
521  io.rollback.bits.ftqIdx := rollbackUop.cf.ftqPtr
522  io.rollback.bits.ftqOffset := rollbackUop.cf.ftqOffset
523  io.rollback.bits.level := RedirectLevel.flush
524  io.rollback.bits.interrupt := DontCare
525  io.rollback.bits.cfiUpdate := DontCare
526  io.rollback.bits.cfiUpdate.target := rollbackUop.cf.pc
527  // io.rollback.bits.pc := DontCare
528
529  io.rollback.valid := rollbackValidVecChecked.asUInt.orR && !lastCycleFlush && !lastlastCycleFlush
530
531  when(io.rollback.valid) {
532    // XSDebug("Mem rollback: pc %x roqidx %d\n", io.rollback.bits.cfi, io.rollback.bits.roqIdx.asUInt)
533  }
534
535  /**
536    * Memory mapped IO / other uncached operations
537    *
538    * States:
539    * (1) writeback from store units: mark as pending
540    * (2) when they reach ROB's head, they can be sent to uncache channel
541    * (3) response from uncache channel: mark as datavalid
542    * (4) writeback to ROB (and other units): mark as writebacked
543    * (5) ROB commits the instruction: same as normal instructions
544    */
545  //(2) when they reach ROB's head, they can be sent to uncache channel
546  val lqTailMmioPending = WireInit(pending(deqPtr))
547  val lqTailAllocated = WireInit(allocated(deqPtr))
548  val s_idle :: s_req :: s_resp :: s_wait :: Nil = Enum(4)
549  val uncacheState = RegInit(s_idle)
550  switch(uncacheState) {
551    is(s_idle) {
552      when(io.roq.pendingld && lqTailMmioPending && lqTailAllocated) {
553        uncacheState := s_req
554      }
555    }
556    is(s_req) {
557      when(io.uncache.req.fire()) {
558        uncacheState := s_resp
559      }
560    }
561    is(s_resp) {
562      when(io.uncache.resp.fire()) {
563        uncacheState := s_wait
564      }
565    }
566    is(s_wait) {
567      when(io.roq.commit) {
568        uncacheState := s_idle // ready for next mmio
569      }
570    }
571  }
572  io.uncache.req.valid := uncacheState === s_req
573
574  dataModule.io.uncache.raddr := deqPtrExtNext.value
575
576  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
577  io.uncache.req.bits.addr := dataModule.io.uncache.rdata.paddr
578  io.uncache.req.bits.data := dataModule.io.uncache.rdata.data
579  io.uncache.req.bits.mask := dataModule.io.uncache.rdata.mask
580
581  io.uncache.req.bits.id   := DontCare
582
583  io.uncache.resp.ready := true.B
584
585  when (io.uncache.req.fire()) {
586    pending(deqPtr) := false.B
587
588    XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n",
589      uop(deqPtr).cf.pc,
590      io.uncache.req.bits.addr,
591      io.uncache.req.bits.data,
592      io.uncache.req.bits.cmd,
593      io.uncache.req.bits.mask
594    )
595  }
596
597  // (3) response from uncache channel: mark as datavalid
598  dataModule.io.uncache.wen := false.B
599  when(io.uncache.resp.fire()){
600    datavalid(deqPtr) := true.B
601    dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0))
602    dataModule.io.uncache.wen := true.B
603
604    XSDebug("uncache resp: data %x\n", io.dcache.bits.data)
605  }
606
607  // Read vaddr for mem exception
608  // no inst will be commited 1 cycle before tval update
609  vaddrModule.io.raddr(0) := (deqPtrExt + commitCount).value
610  io.exceptionAddr.vaddr := vaddrModule.io.rdata(0)
611
612  // misprediction recovery / exception redirect
613  // invalidate lq term using robIdx
614  val needCancel = Wire(Vec(LoadQueueSize, Bool()))
615  for (i <- 0 until LoadQueueSize) {
616    needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect, io.flush) && allocated(i)
617    when (needCancel(i)) {
618        allocated(i) := false.B
619    }
620  }
621
622  /**
623    * update pointers
624    */
625  val lastCycleCancelCount = PopCount(RegNext(needCancel))
626  // when io.brqRedirect.valid, we don't allow eneuque even though it may fire.
627  val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept && !(io.brqRedirect.valid || io.flush), PopCount(io.enq.req.map(_.valid)), 0.U)
628  when (lastCycleRedirect.valid || lastCycleFlush) {
629    // we recover the pointers in the next cycle after redirect
630    enqPtrExt := VecInit(enqPtrExt.map(_ - lastCycleCancelCount))
631  }.otherwise {
632    enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
633  }
634
635  deqPtrExtNext := deqPtrExt + commitCount
636  deqPtrExt := deqPtrExtNext
637
638  val validCount = distanceBetween(enqPtrExt(0), deqPtrExt)
639
640  allowEnqueue := validCount + enqNumber <= (LoadQueueSize - RenameWidth).U
641
642  // perf counter
643  QueuePerf(LoadQueueSize, validCount, !allowEnqueue)
644  XSPerfAccumulate("rollback", io.rollback.valid) // rollback redirect generated
645  XSPerfAccumulate("mmioCycle", uncacheState =/= s_idle) // lq is busy dealing with uncache req
646  XSPerfAccumulate("mmioCnt", io.uncache.req.fire())
647  XSPerfAccumulate("refill", io.dcache.valid)
648  XSPerfAccumulate("writeback_success", PopCount(VecInit(io.ldout.map(i => i.fire()))))
649  XSPerfAccumulate("writeback_blocked", PopCount(VecInit(io.ldout.map(i => i.valid && !i.ready))))
650  XSPerfAccumulate("utilization_miss", PopCount((0 until LoadQueueSize).map(i => allocated(i) && miss(i))))
651
652  // debug info
653  XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr)
654
655  def PrintFlag(flag: Bool, name: String): Unit = {
656    when(flag) {
657      XSDebug(false, true.B, name)
658    }.otherwise {
659      XSDebug(false, true.B, " ")
660    }
661  }
662
663  for (i <- 0 until LoadQueueSize) {
664    if (i % 4 == 0) XSDebug("")
665    XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.debug(i).paddr)
666    PrintFlag(allocated(i), "a")
667    PrintFlag(allocated(i) && datavalid(i), "v")
668    PrintFlag(allocated(i) && writebacked(i), "w")
669    PrintFlag(allocated(i) && miss(i), "m")
670    // PrintFlag(allocated(i) && listening(i), "l")
671    PrintFlag(allocated(i) && pending(i), "p")
672    XSDebug(false, true.B, " ")
673    if (i % 4 == 3 || i == LoadQueueSize - 1) XSDebug(false, true.B, "\n")
674  }
675
676}
677