xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueue.scala (revision 5c7674fe439b80ea3bb6c6207b7d169ed6183be5)
1package xiangshan.mem
2
3import chipsalliance.rocketchip.config.Parameters
4import chisel3._
5import chisel3.util._
6import freechips.rocketchip.tile.HasFPUParameters
7import utils._
8import xiangshan._
9import xiangshan.cache._
10import xiangshan.cache.{DCacheLineIO, DCacheWordIO, MemoryOpConstants, TlbRequestIO}
11import xiangshan.mem._
12import xiangshan.backend.roq.RoqLsqIO
13import xiangshan.backend.fu.HasExceptionNO
14import xiangshan.backend.ftq.FtqPtr
15
16
17class LqPtr(implicit p: Parameters) extends CircularQueuePtr[LqPtr](
18  p => p(XSCoreParamsKey).LoadQueueSize
19){
20  override def cloneType = (new LqPtr).asInstanceOf[this.type]
21}
22
23object LqPtr {
24  def apply(f: Bool, v: UInt)(implicit p: Parameters): LqPtr = {
25    val ptr = Wire(new LqPtr)
26    ptr.flag := f
27    ptr.value := v
28    ptr
29  }
30}
31
32trait HasFpLoadHelper { this: HasFPUParameters =>
33  def fpRdataHelper(uop: MicroOp, rdata: UInt): UInt = {
34    LookupTree(uop.ctrl.fuOpType, List(
35      LSUOpType.lw   -> recode(rdata(31, 0), S),
36      LSUOpType.ld   -> recode(rdata(63, 0), D)
37    ))
38  }
39}
40trait HasLoadHelper { this: XSModule =>
41  def rdataHelper(uop: MicroOp, rdata: UInt): UInt = {
42    val fpWen = uop.ctrl.fpWen
43    LookupTree(uop.ctrl.fuOpType, List(
44      LSUOpType.lb   -> SignExt(rdata(7, 0) , XLEN),
45      LSUOpType.lh   -> SignExt(rdata(15, 0), XLEN),
46      LSUOpType.lw   -> Mux(fpWen, Cat(Fill(32, 1.U(1.W)), rdata(31, 0)), SignExt(rdata(31, 0), XLEN)),
47      LSUOpType.ld   -> Mux(fpWen, rdata, SignExt(rdata(63, 0), XLEN)),
48      LSUOpType.lbu  -> ZeroExt(rdata(7, 0) , XLEN),
49      LSUOpType.lhu  -> ZeroExt(rdata(15, 0), XLEN),
50      LSUOpType.lwu  -> ZeroExt(rdata(31, 0), XLEN),
51    ))
52  }
53}
54
55class LqEnqIO(implicit p: Parameters) extends XSBundle {
56  val canAccept = Output(Bool())
57  val sqCanAccept = Input(Bool())
58  val needAlloc = Vec(RenameWidth, Input(Bool()))
59  val req = Vec(RenameWidth, Flipped(ValidIO(new MicroOp)))
60  val resp = Vec(RenameWidth, Output(new LqPtr))
61}
62
63// Load Queue
64class LoadQueue(implicit p: Parameters) extends XSModule
65  with HasDCacheParameters
66  with HasCircularQueuePtrHelper
67  with HasLoadHelper
68  with HasExceptionNO
69{
70  val io = IO(new Bundle() {
71    val enq = new LqEnqIO
72    val brqRedirect = Flipped(ValidIO(new Redirect))
73    val flush = Input(Bool())
74    val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle)))
75    val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle)))
76    val loadDataForwarded = Vec(LoadPipelineWidth, Input(Bool()))
77    val needReplayFromRS = Vec(LoadPipelineWidth, Input(Bool()))
78    val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback int load
79    val load_s1 = Vec(LoadPipelineWidth, Flipped(new PipeLoadForwardQueryIO))
80    val roq = Flipped(new RoqLsqIO)
81    val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store
82    val dcache = Flipped(ValidIO(new Refill))
83    val uncache = new DCacheWordIO
84    val exceptionAddr = new ExceptionAddrIO
85    val lqFull = Output(Bool())
86  })
87
88  println("LoadQueue: size:" + LoadQueueSize)
89
90  val uop = Reg(Vec(LoadQueueSize, new MicroOp))
91  // val data = Reg(Vec(LoadQueueSize, new LsRoqEntry))
92  val dataModule = Module(new LoadQueueData(LoadQueueSize, wbNumRead = LoadPipelineWidth, wbNumWrite = LoadPipelineWidth))
93  dataModule.io := DontCare
94  val vaddrModule = Module(new SyncDataModuleTemplate(UInt(VAddrBits.W), LoadQueueSize, numRead = 1, numWrite = LoadPipelineWidth))
95  vaddrModule.io := DontCare
96  val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated
97  val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid
98  val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
99  val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request
100  // val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result
101  val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq
102
103  val debug_mmio = Reg(Vec(LoadQueueSize, Bool())) // mmio: inst is an mmio inst
104  val debug_paddr = Reg(Vec(LoadQueueSize, UInt(PAddrBits.W))) // mmio: inst is an mmio inst
105
106  val enqPtrExt = RegInit(VecInit((0 until RenameWidth).map(_.U.asTypeOf(new LqPtr))))
107  val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
108  val deqPtrExtNext = Wire(new LqPtr)
109  val allowEnqueue = RegInit(true.B)
110
111  val enqPtr = enqPtrExt(0).value
112  val deqPtr = deqPtrExt.value
113
114  val deqMask = UIntToMask(deqPtr, LoadQueueSize)
115  val enqMask = UIntToMask(enqPtr, LoadQueueSize)
116
117  val commitCount = RegNext(io.roq.lcommit)
118
119  /**
120    * Enqueue at dispatch
121    *
122    * Currently, LoadQueue only allows enqueue when #emptyEntries > RenameWidth(EnqWidth)
123    */
124  io.enq.canAccept := allowEnqueue
125
126  for (i <- 0 until RenameWidth) {
127    val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i))
128    val lqIdx = enqPtrExt(offset)
129    val index = lqIdx.value
130    when (io.enq.req(i).valid && io.enq.canAccept && io.enq.sqCanAccept && !(io.brqRedirect.valid || io.flush)) {
131      uop(index) := io.enq.req(i).bits
132      allocated(index) := true.B
133      datavalid(index) := false.B
134      writebacked(index) := false.B
135      miss(index) := false.B
136      // listening(index) := false.B
137      pending(index) := false.B
138    }
139    io.enq.resp(i) := lqIdx
140  }
141  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
142
143  /**
144    * Writeback load from load units
145    *
146    * Most load instructions writeback to regfile at the same time.
147    * However,
148    *   (1) For an mmio instruction with exceptions, it writes back to ROB immediately.
149    *   (2) For an mmio instruction without exceptions, it does not write back.
150    * The mmio instruction will be sent to lower level when it reaches ROB's head.
151    * After uncache response, it will write back through arbiter with loadUnit.
152    *   (3) For cache misses, it is marked miss and sent to dcache later.
153    * After cache refills, it will write back through arbiter with loadUnit.
154    */
155  for (i <- 0 until LoadPipelineWidth) {
156    dataModule.io.wb.wen(i) := false.B
157    val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
158    when(io.loadIn(i).fire()) {
159      when(io.loadIn(i).bits.miss) {
160        XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n",
161          io.loadIn(i).bits.uop.lqIdx.asUInt,
162          io.loadIn(i).bits.uop.cf.pc,
163          io.loadIn(i).bits.vaddr,
164          io.loadIn(i).bits.paddr,
165          io.loadIn(i).bits.data,
166          io.loadIn(i).bits.mask,
167          io.loadIn(i).bits.forwardData.asUInt,
168          io.loadIn(i).bits.forwardMask.asUInt,
169          io.loadIn(i).bits.mmio
170        )
171      }.otherwise {
172        XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n",
173        io.loadIn(i).bits.uop.lqIdx.asUInt,
174        io.loadIn(i).bits.uop.cf.pc,
175        io.loadIn(i).bits.vaddr,
176        io.loadIn(i).bits.paddr,
177        io.loadIn(i).bits.data,
178        io.loadIn(i).bits.mask,
179        io.loadIn(i).bits.forwardData.asUInt,
180        io.loadIn(i).bits.forwardMask.asUInt,
181        io.loadIn(i).bits.mmio
182      )}
183      datavalid(loadWbIndex) := (!io.loadIn(i).bits.miss || io.loadDataForwarded(i)) &&
184        !io.loadIn(i).bits.mmio && // mmio data is not valid until we finished uncache access
185        !io.needReplayFromRS(i) // do not writeback if that inst will be resend from rs
186      writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
187
188      val loadWbData = Wire(new LQDataEntry)
189      loadWbData.paddr := io.loadIn(i).bits.paddr
190      loadWbData.mask := io.loadIn(i).bits.mask
191      loadWbData.data := io.loadIn(i).bits.forwardData.asUInt // fwd data
192      loadWbData.fwdMask := io.loadIn(i).bits.forwardMask
193      dataModule.io.wbWrite(i, loadWbIndex, loadWbData)
194      dataModule.io.wb.wen(i) := true.B
195
196
197      debug_mmio(loadWbIndex) := io.loadIn(i).bits.mmio
198      debug_paddr(loadWbIndex) := io.loadIn(i).bits.paddr
199
200      val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
201      miss(loadWbIndex) := dcacheMissed && !io.loadDataForwarded(i) && !io.needReplayFromRS(i)
202      pending(loadWbIndex) := io.loadIn(i).bits.mmio
203      uop(loadWbIndex).debugInfo.issueTime := io.loadIn(i).bits.uop.debugInfo.issueTime
204    }
205    // vaddrModule write is delayed, as vaddrModule will not be read right after write
206    vaddrModule.io.waddr(i) := RegNext(loadWbIndex)
207    vaddrModule.io.wdata(i) := RegNext(io.loadIn(i).bits.vaddr)
208    vaddrModule.io.wen(i) := RegNext(io.loadIn(i).fire())
209  }
210
211  when(io.dcache.valid) {
212    XSDebug("miss resp: paddr:0x%x data %x\n", io.dcache.bits.addr, io.dcache.bits.data)
213  }
214
215  // Refill 64 bit in a cycle
216  // Refill data comes back from io.dcache.resp
217  dataModule.io.refill.valid := io.dcache.valid
218  dataModule.io.refill.paddr := io.dcache.bits.addr
219  dataModule.io.refill.data := io.dcache.bits.data
220
221  (0 until LoadQueueSize).map(i => {
222    dataModule.io.refill.refillMask(i) := allocated(i) && miss(i)
223    when(dataModule.io.refill.valid && dataModule.io.refill.refillMask(i) && dataModule.io.refill.matchMask(i)) {
224      datavalid(i) := true.B
225      miss(i) := false.B
226    }
227  })
228
229  // Writeback up to 2 missed load insts to CDB
230  //
231  // Pick 2 missed load (data refilled), write them back to cdb
232  // 2 refilled load will be selected from even/odd entry, separately
233
234  // Stage 0
235  // Generate writeback indexes
236
237  def getEvenBits(input: UInt): UInt = {
238    require(input.getWidth == LoadQueueSize)
239    VecInit((0 until LoadQueueSize/2).map(i => {input(2*i)})).asUInt
240  }
241  def getOddBits(input: UInt): UInt = {
242    require(input.getWidth == LoadQueueSize)
243    VecInit((0 until LoadQueueSize/2).map(i => {input(2*i+1)})).asUInt
244  }
245
246  val loadWbSel = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W))) // index selected last cycle
247  val loadWbSelV = Wire(Vec(LoadPipelineWidth, Bool())) // index selected in last cycle is valid
248
249  val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => {
250    allocated(i) && !writebacked(i) && datavalid(i)
251  })).asUInt() // use uint instead vec to reduce verilog lines
252  val evenDeqMask = getEvenBits(deqMask)
253  val oddDeqMask = getOddBits(deqMask)
254  // generate lastCycleSelect mask
255  val evenSelectMask = Mux(io.ldout(0).fire(), getEvenBits(UIntToOH(loadWbSel(0))), 0.U)
256  val oddSelectMask = Mux(io.ldout(1).fire(), getOddBits(UIntToOH(loadWbSel(1))), 0.U)
257  // generate real select vec
258  val loadEvenSelVec = getEvenBits(loadWbSelVec) & ~evenSelectMask
259  val loadOddSelVec = getOddBits(loadWbSelVec) & ~oddSelectMask
260
261  def toVec(a: UInt): Vec[Bool] = {
262    VecInit(a.asBools)
263  }
264
265  val loadWbSelGen = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W)))
266  val loadWbSelVGen = Wire(Vec(LoadPipelineWidth, Bool()))
267  loadWbSelGen(0) := Cat(getFirstOne(toVec(loadEvenSelVec), evenDeqMask), 0.U(1.W))
268  loadWbSelVGen(0):= loadEvenSelVec.asUInt.orR
269  loadWbSelGen(1) := Cat(getFirstOne(toVec(loadOddSelVec), oddDeqMask), 1.U(1.W))
270  loadWbSelVGen(1) := loadOddSelVec.asUInt.orR
271
272  (0 until LoadPipelineWidth).map(i => {
273    loadWbSel(i) := RegNext(loadWbSelGen(i))
274    loadWbSelV(i) := RegNext(loadWbSelVGen(i), init = false.B)
275    when(io.ldout(i).fire()){
276      // Mark them as writebacked, so they will not be selected in the next cycle
277      writebacked(loadWbSel(i)) := true.B
278    }
279  })
280
281  // Stage 1
282  // Use indexes generated in cycle 0 to read data
283  // writeback data to cdb
284  (0 until LoadPipelineWidth).map(i => {
285    // data select
286    dataModule.io.wb.raddr(i) := loadWbSelGen(i)
287    val rdata = dataModule.io.wb.rdata(i).data
288    val seluop = uop(loadWbSel(i))
289    val func = seluop.ctrl.fuOpType
290    val raddr = dataModule.io.wb.rdata(i).paddr
291    val rdataSel = LookupTree(raddr(2, 0), List(
292      "b000".U -> rdata(63, 0),
293      "b001".U -> rdata(63, 8),
294      "b010".U -> rdata(63, 16),
295      "b011".U -> rdata(63, 24),
296      "b100".U -> rdata(63, 32),
297      "b101".U -> rdata(63, 40),
298      "b110".U -> rdata(63, 48),
299      "b111".U -> rdata(63, 56)
300    ))
301    val rdataPartialLoad = rdataHelper(seluop, rdataSel)
302
303    // writeback missed int/fp load
304    //
305    // Int load writeback will finish (if not blocked) in one cycle
306    io.ldout(i).bits.uop := seluop
307    io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr)
308    io.ldout(i).bits.data := rdataPartialLoad
309    io.ldout(i).bits.redirectValid := false.B
310    io.ldout(i).bits.redirect := DontCare
311    io.ldout(i).bits.debug.isMMIO := debug_mmio(loadWbSel(i))
312    io.ldout(i).bits.debug.isPerfCnt := false.B
313    io.ldout(i).bits.debug.paddr := debug_paddr(loadWbSel(i))
314    io.ldout(i).bits.fflags := DontCare
315    io.ldout(i).valid := loadWbSelV(i)
316
317    when(io.ldout(i).fire()) {
318      XSInfo("int load miss write to cbd roqidx %d lqidx %d pc 0x%x mmio %x\n",
319        io.ldout(i).bits.uop.roqIdx.asUInt,
320        io.ldout(i).bits.uop.lqIdx.asUInt,
321        io.ldout(i).bits.uop.cf.pc,
322        debug_mmio(loadWbSel(i))
323      )
324    }
325
326  })
327
328  /**
329    * Load commits
330    *
331    * When load commited, mark it as !allocated and move deqPtrExt forward.
332    */
333  (0 until CommitWidth).map(i => {
334    when(commitCount > i.U){
335      allocated(deqPtr+i.U) := false.B
336    }
337  })
338
339  def getFirstOne(mask: Vec[Bool], startMask: UInt) = {
340    val length = mask.length
341    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
342    val highBitsUint = Cat(highBits.reverse)
343    PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt))
344  }
345
346  def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = {
347    assert(valid.length == uop.length)
348    assert(valid.length == 2)
349    Mux(valid(0) && valid(1),
350      Mux(isAfter(uop(0).roqIdx, uop(1).roqIdx), uop(1), uop(0)),
351      Mux(valid(0) && !valid(1), uop(0), uop(1)))
352  }
353
354  def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = {
355    assert(valid.length == uop.length)
356    val length = valid.length
357    (0 until length).map(i => {
358      (0 until length).map(j => {
359        Mux(valid(i) && valid(j),
360          isAfter(uop(i).roqIdx, uop(j).roqIdx),
361          Mux(!valid(i), true.B, false.B))
362      })
363    })
364  }
365
366  /**
367    * Memory violation detection
368    *
369    * When store writes back, it searches LoadQueue for younger load instructions
370    * with the same load physical address. They loaded wrong data and need re-execution.
371    *
372    * Cycle 0: Store Writeback
373    *   Generate match vector for store address with rangeMask(stPtr, enqPtr).
374    *   Besides, load instructions in LoadUnit_S1 and S2 are also checked.
375    * Cycle 1: Redirect Generation
376    *   There're three possible types of violations, up to 6 possible redirect requests.
377    *   Choose the oldest load (part 1). (4 + 2) -> (1 + 2)
378    * Cycle 2: Redirect Fire
379    *   Choose the oldest load (part 2). (3 -> 1)
380    *   Prepare redirect request according to the detected violation.
381    *   Fire redirect request (if valid)
382    */
383
384  // stage 0:        lq l1 wb     l1 wb lq
385  //                 |  |  |      |  |  |  (paddr match)
386  // stage 1:        lq l1 wb     l1 wb lq
387  //                 |  |  |      |  |  |
388  //                 |  |------------|  |
389  //                 |        |         |
390  // stage 2:        lq      l1wb       lq
391  //                 |        |         |
392  //                 --------------------
393  //                          |
394  //                      rollback req
395  io.load_s1 := DontCare
396  def detectRollback(i: Int) = {
397    val startIndex = io.storeIn(i).bits.uop.lqIdx.value
398    val lqIdxMask = UIntToMask(startIndex, LoadQueueSize)
399    val xorMask = lqIdxMask ^ enqMask
400    val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt(0).flag
401    val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
402
403    // check if load already in lq needs to be rolledback
404    dataModule.io.violation(i).paddr := io.storeIn(i).bits.paddr
405    dataModule.io.violation(i).mask := io.storeIn(i).bits.mask
406    val addrMaskMatch = RegNext(dataModule.io.violation(i).violationMask)
407    val entryNeedCheck = RegNext(VecInit((0 until LoadQueueSize).map(j => {
408      allocated(j) && toEnqPtrMask(j) && (datavalid(j) || miss(j))
409    })))
410    val lqViolationVec = VecInit((0 until LoadQueueSize).map(j => {
411      addrMaskMatch(j) && entryNeedCheck(j)
412    }))
413    val lqViolation = lqViolationVec.asUInt().orR()
414    val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask))
415    val lqViolationUop = uop(lqViolationIndex)
416    // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag
417    // lqViolationUop.lqIdx.value := lqViolationIndex
418    XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n")
419
420    // when l/s writeback to roq together, check if rollback is needed
421    val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
422      io.loadIn(j).valid &&
423        isAfter(io.loadIn(j).bits.uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
424        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) &&
425        (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR
426    })))
427    val wbViolation = wbViolationVec.asUInt().orR()
428    val wbViolationUop = getOldestInTwo(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits.uop))))
429    XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n")
430
431    // check if rollback is needed for load in l1
432    val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
433      io.load_s1(j).valid && // L1 valid
434        isAfter(io.load_s1(j).uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
435        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.load_s1(j).paddr(PAddrBits - 1, 3) &&
436        (io.storeIn(i).bits.mask & io.load_s1(j).mask).orR
437    })))
438    val l1Violation = l1ViolationVec.asUInt().orR()
439    val l1ViolationUop = getOldestInTwo(l1ViolationVec, RegNext(VecInit(io.load_s1.map(_.uop))))
440    XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n")
441
442    XSDebug(
443      l1Violation,
444      "need rollback (l1 load) pc %x roqidx %d target %x\n",
445      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, l1ViolationUop.roqIdx.asUInt
446    )
447    XSDebug(
448      lqViolation,
449      "need rollback (ld wb before store) pc %x roqidx %d target %x\n",
450      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, lqViolationUop.roqIdx.asUInt
451    )
452    XSDebug(
453      wbViolation,
454      "need rollback (ld/st wb together) pc %x roqidx %d target %x\n",
455      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, wbViolationUop.roqIdx.asUInt
456    )
457
458    ((lqViolation, lqViolationUop), (wbViolation, wbViolationUop), (l1Violation, l1ViolationUop))
459  }
460
461  def rollbackSel(a: Valid[MicroOpRbExt], b: Valid[MicroOpRbExt]): ValidIO[MicroOpRbExt] = {
462    Mux(
463      a.valid,
464      Mux(
465        b.valid,
466        Mux(isAfter(a.bits.uop.roqIdx, b.bits.uop.roqIdx), b, a), // a,b both valid, sel oldest
467        a // sel a
468      ),
469      b // sel b
470    )
471  }
472  val lastCycleRedirect = RegNext(io.brqRedirect)
473  val lastlastCycleRedirect = RegNext(lastCycleRedirect)
474  val lastCycleFlush = RegNext(io.flush)
475  val lastlastCycleFlush = RegNext(lastCycleFlush)
476
477  // S2: select rollback (part1) and generate rollback request
478  // rollback check
479  // Wb/L1 rollback seq check is done in s2
480  val rollbackWb = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt)))
481  val rollbackL1 = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt)))
482  val rollbackL1Wb = Wire(Vec(StorePipelineWidth*2, Valid(new MicroOpRbExt)))
483  // Lq rollback seq check is done in s3 (next stage), as getting rollbackLq MicroOp is slow
484  val rollbackLq = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt)))
485  // store ftq index for store set update
486  val stFtqIdxS2 = Wire(Vec(StorePipelineWidth, new FtqPtr))
487  val stFtqOffsetS2 = Wire(Vec(StorePipelineWidth, UInt(log2Up(PredictWidth).W)))
488  for (i <- 0 until StorePipelineWidth) {
489    val detectedRollback = detectRollback(i)
490    rollbackLq(i).valid := detectedRollback._1._1 && RegNext(io.storeIn(i).valid)
491    rollbackLq(i).bits.uop := detectedRollback._1._2
492    rollbackLq(i).bits.flag := i.U
493    rollbackWb(i).valid := detectedRollback._2._1 && RegNext(io.storeIn(i).valid)
494    rollbackWb(i).bits.uop := detectedRollback._2._2
495    rollbackWb(i).bits.flag := i.U
496    rollbackL1(i).valid := detectedRollback._3._1 && RegNext(io.storeIn(i).valid)
497    rollbackL1(i).bits.uop := detectedRollback._3._2
498    rollbackL1(i).bits.flag := i.U
499    rollbackL1Wb(2*i) := rollbackL1(i)
500    rollbackL1Wb(2*i+1) := rollbackWb(i)
501    stFtqIdxS2(i) := RegNext(io.storeIn(i).bits.uop.cf.ftqPtr)
502    stFtqOffsetS2(i) := RegNext(io.storeIn(i).bits.uop.cf.ftqOffset)
503  }
504
505  val rollbackL1WbSelected = ParallelOperation(rollbackL1Wb, rollbackSel)
506  val rollbackL1WbVReg = RegNext(rollbackL1WbSelected.valid)
507  val rollbackL1WbReg = RegEnable(rollbackL1WbSelected.bits, rollbackL1WbSelected.valid)
508  val rollbackLq0VReg = RegNext(rollbackLq(0).valid)
509  val rollbackLq0Reg = RegEnable(rollbackLq(0).bits, rollbackLq(0).valid)
510  val rollbackLq1VReg = RegNext(rollbackLq(1).valid)
511  val rollbackLq1Reg = RegEnable(rollbackLq(1).bits, rollbackLq(1).valid)
512
513  // S3: select rollback (part2), generate rollback request, then fire rollback request
514  // Note that we use roqIdx - 1.U to flush the load instruction itself.
515  // Thus, here if last cycle's roqIdx equals to this cycle's roqIdx, it still triggers the redirect.
516
517  // FIXME: this is ugly
518  val rollbackValidVec = Seq(rollbackL1WbVReg, rollbackLq0VReg, rollbackLq1VReg)
519  val rollbackUopExtVec = Seq(rollbackL1WbReg, rollbackLq0Reg, rollbackLq1Reg)
520
521  // select uop in parallel
522  val mask = getAfterMask(rollbackValidVec, rollbackUopExtVec.map(i => i.uop))
523  val oneAfterZero = mask(1)(0)
524  val rollbackUopExt = Mux(oneAfterZero && mask(2)(0),
525    rollbackUopExtVec(0),
526    Mux(!oneAfterZero && mask(2)(1), rollbackUopExtVec(1), rollbackUopExtVec(2)))
527  val stFtqIdxS3 = RegNext(stFtqIdxS2)
528  val stFtqOffsetS3 = RegNext(stFtqOffsetS2)
529  val rollbackUop = rollbackUopExt.uop
530  val rollbackStFtqIdx = stFtqIdxS3(rollbackUopExt.flag)
531  val rollbackStFtqOffset = stFtqOffsetS3(rollbackUopExt.flag)
532
533  // check if rollback request is still valid in parallel
534  val rollbackValidVecChecked = Wire(Vec(3, Bool()))
535  for(((v, uop), idx) <- rollbackValidVec.zip(rollbackUopExtVec.map(i => i.uop)).zipWithIndex) {
536    rollbackValidVecChecked(idx) := v &&
537      (!lastCycleRedirect.valid || isBefore(uop.roqIdx, lastCycleRedirect.bits.roqIdx)) &&
538      (!lastlastCycleRedirect.valid || isBefore(uop.roqIdx, lastlastCycleRedirect.bits.roqIdx))
539  }
540
541  io.rollback.bits.roqIdx := rollbackUop.roqIdx
542  io.rollback.bits.ftqIdx := rollbackUop.cf.ftqPtr
543  io.rollback.bits.stFtqIdx := rollbackStFtqIdx
544  io.rollback.bits.ftqOffset := rollbackUop.cf.ftqOffset
545  io.rollback.bits.stFtqOffset := rollbackStFtqOffset
546  io.rollback.bits.level := RedirectLevel.flush
547  io.rollback.bits.interrupt := DontCare
548  io.rollback.bits.cfiUpdate := DontCare
549  io.rollback.bits.cfiUpdate.target := rollbackUop.cf.pc
550  // io.rollback.bits.pc := DontCare
551
552  io.rollback.valid := rollbackValidVecChecked.asUInt.orR && !lastCycleFlush && !lastlastCycleFlush
553
554  when(io.rollback.valid) {
555    // XSDebug("Mem rollback: pc %x roqidx %d\n", io.rollback.bits.cfi, io.rollback.bits.roqIdx.asUInt)
556  }
557
558  /**
559    * Memory mapped IO / other uncached operations
560    *
561    * States:
562    * (1) writeback from store units: mark as pending
563    * (2) when they reach ROB's head, they can be sent to uncache channel
564    * (3) response from uncache channel: mark as datavalid
565    * (4) writeback to ROB (and other units): mark as writebacked
566    * (5) ROB commits the instruction: same as normal instructions
567    */
568  //(2) when they reach ROB's head, they can be sent to uncache channel
569  val lqTailMmioPending = WireInit(pending(deqPtr))
570  val lqTailAllocated = WireInit(allocated(deqPtr))
571  val s_idle :: s_req :: s_resp :: s_wait :: Nil = Enum(4)
572  val uncacheState = RegInit(s_idle)
573  switch(uncacheState) {
574    is(s_idle) {
575      when(io.roq.pendingld && lqTailMmioPending && lqTailAllocated) {
576        uncacheState := s_req
577      }
578    }
579    is(s_req) {
580      when(io.uncache.req.fire()) {
581        uncacheState := s_resp
582      }
583    }
584    is(s_resp) {
585      when(io.uncache.resp.fire()) {
586        uncacheState := s_wait
587      }
588    }
589    is(s_wait) {
590      when(io.roq.commit) {
591        uncacheState := s_idle // ready for next mmio
592      }
593    }
594  }
595  io.uncache.req.valid := uncacheState === s_req
596
597  dataModule.io.uncache.raddr := deqPtrExtNext.value
598
599  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
600  io.uncache.req.bits.addr := dataModule.io.uncache.rdata.paddr
601  io.uncache.req.bits.data := dataModule.io.uncache.rdata.data
602  io.uncache.req.bits.mask := dataModule.io.uncache.rdata.mask
603
604  io.uncache.req.bits.id   := DontCare
605
606  io.uncache.resp.ready := true.B
607
608  when (io.uncache.req.fire()) {
609    pending(deqPtr) := false.B
610
611    XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n",
612      uop(deqPtr).cf.pc,
613      io.uncache.req.bits.addr,
614      io.uncache.req.bits.data,
615      io.uncache.req.bits.cmd,
616      io.uncache.req.bits.mask
617    )
618  }
619
620  // (3) response from uncache channel: mark as datavalid
621  dataModule.io.uncache.wen := false.B
622  when(io.uncache.resp.fire()){
623    datavalid(deqPtr) := true.B
624    dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0))
625    dataModule.io.uncache.wen := true.B
626
627    XSDebug("uncache resp: data %x\n", io.dcache.bits.data)
628  }
629
630  // Read vaddr for mem exception
631  // no inst will be commited 1 cycle before tval update
632  vaddrModule.io.raddr(0) := (deqPtrExt + commitCount).value
633  io.exceptionAddr.vaddr := vaddrModule.io.rdata(0)
634
635  // misprediction recovery / exception redirect
636  // invalidate lq term using robIdx
637  val needCancel = Wire(Vec(LoadQueueSize, Bool()))
638  for (i <- 0 until LoadQueueSize) {
639    needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect, io.flush) && allocated(i)
640    when (needCancel(i)) {
641        allocated(i) := false.B
642    }
643  }
644
645  /**
646    * update pointers
647    */
648  val lastCycleCancelCount = PopCount(RegNext(needCancel))
649  // when io.brqRedirect.valid, we don't allow eneuque even though it may fire.
650  val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept && !(io.brqRedirect.valid || io.flush), PopCount(io.enq.req.map(_.valid)), 0.U)
651  when (lastCycleRedirect.valid || lastCycleFlush) {
652    // we recover the pointers in the next cycle after redirect
653    enqPtrExt := VecInit(enqPtrExt.map(_ - lastCycleCancelCount))
654  }.otherwise {
655    enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
656  }
657
658  deqPtrExtNext := deqPtrExt + commitCount
659  deqPtrExt := deqPtrExtNext
660
661  val validCount = distanceBetween(enqPtrExt(0), deqPtrExt)
662
663  allowEnqueue := validCount + enqNumber <= (LoadQueueSize - RenameWidth).U
664
665  /**
666    * misc
667    */
668  io.roq.storeDataRoqWb := DontCare // will be overwriten by store queue's result
669
670  // perf counter
671  QueuePerf(LoadQueueSize, validCount, !allowEnqueue)
672  io.lqFull := !allowEnqueue
673  XSPerfAccumulate("rollback", io.rollback.valid) // rollback redirect generated
674  XSPerfAccumulate("mmioCycle", uncacheState =/= s_idle) // lq is busy dealing with uncache req
675  XSPerfAccumulate("mmioCnt", io.uncache.req.fire())
676  XSPerfAccumulate("refill", io.dcache.valid)
677  XSPerfAccumulate("writeback_success", PopCount(VecInit(io.ldout.map(i => i.fire()))))
678  XSPerfAccumulate("writeback_blocked", PopCount(VecInit(io.ldout.map(i => i.valid && !i.ready))))
679  XSPerfAccumulate("utilization_miss", PopCount((0 until LoadQueueSize).map(i => allocated(i) && miss(i))))
680
681  // debug info
682  XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr)
683
684  def PrintFlag(flag: Bool, name: String): Unit = {
685    when(flag) {
686      XSDebug(false, true.B, name)
687    }.otherwise {
688      XSDebug(false, true.B, " ")
689    }
690  }
691
692  for (i <- 0 until LoadQueueSize) {
693    if (i % 4 == 0) XSDebug("")
694    XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.debug(i).paddr)
695    PrintFlag(allocated(i), "a")
696    PrintFlag(allocated(i) && datavalid(i), "v")
697    PrintFlag(allocated(i) && writebacked(i), "w")
698    PrintFlag(allocated(i) && miss(i), "m")
699    // PrintFlag(allocated(i) && listening(i), "l")
700    PrintFlag(allocated(i) && pending(i), "p")
701    XSDebug(false, true.B, " ")
702    if (i % 4 == 3 || i == LoadQueueSize - 1) XSDebug(false, true.B, "\n")
703  }
704
705}
706