xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/StoreQueue.scala (revision 3fae98ac4fa4aeb3c8ed035932edb8a13ce11e3c)
1package xiangshan.mem
2
3import chisel3._
4import chisel3.util._
5import utils._
6import xiangshan._
7import xiangshan.cache._
8import xiangshan.cache.{DCacheWordIO, DCacheLineIO, TlbRequestIO, MemoryOpConstants}
9import xiangshan.backend.LSUOpType
10import xiangshan.backend.roq.RoqPtr
11
12
13class SqPtr extends CircularQueuePtr(SqPtr.StoreQueueSize) { }
14
15object SqPtr extends HasXSParameter {
16  def apply(f: Bool, v: UInt): SqPtr = {
17    val ptr = Wire(new SqPtr)
18    ptr.flag := f
19    ptr.value := v
20    ptr
21  }
22}
23
24// Store Queue
25class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueuePtrHelper {
26  val io = IO(new Bundle() {
27    val dp1Req = Vec(RenameWidth, Flipped(DecoupledIO(new MicroOp)))
28    val lqReady = Input(Vec(RenameWidth, Bool()))
29    val sqReady = Output(Vec(RenameWidth, Bool()))
30    val sqIdxs = Output(Vec(RenameWidth, new SqPtr))
31    val brqRedirect = Input(Valid(new Redirect))
32    val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle)))
33    val sbuffer = Vec(StorePipelineWidth, Decoupled(new DCacheWordReq))
34    val stout = Vec(2, DecoupledIO(new ExuOutput)) // writeback store
35    val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
36    val commits = Flipped(Vec(CommitWidth, Valid(new RoqCommit)))
37    val uncache = new DCacheWordIO
38    val roqDeqPtr = Input(new RoqPtr)
39    // val refill = Flipped(Valid(new DCacheLineReq ))
40    val oldestStore = Output(Valid(new RoqPtr))
41    val exceptionAddr = new ExceptionAddrIO
42  })
43
44  val uop = Reg(Vec(StoreQueueSize, new MicroOp))
45  // val data = Reg(Vec(StoreQueueSize, new LsqEntry))
46  val dataModule = Module(new LSQueueData(StoreQueueSize, StorePipelineWidth))
47  dataModule.io := DontCare
48  val allocated = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // sq entry has been allocated
49  val datavalid = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // non-mmio data is valid
50  val writebacked = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // inst has been writebacked to CDB
51  val commited = Reg(Vec(StoreQueueSize, Bool())) // inst has been commited by roq
52  val pending = Reg(Vec(StoreQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq
53
54  val ringBufferHeadExtended = RegInit(0.U.asTypeOf(new SqPtr))
55  val ringBufferTailExtended = RegInit(0.U.asTypeOf(new SqPtr))
56  val ringBufferHead = ringBufferHeadExtended.value
57  val ringBufferTail = ringBufferTailExtended.value
58  val ringBufferSameFlag = ringBufferHeadExtended.flag === ringBufferTailExtended.flag
59  val ringBufferEmpty = ringBufferHead === ringBufferTail && ringBufferSameFlag
60  val ringBufferFull = ringBufferHead === ringBufferTail && !ringBufferSameFlag
61  val ringBufferAllowin = !ringBufferFull
62
63  val storeCommit = (0 until CommitWidth).map(i => io.commits(i).valid && !io.commits(i).bits.isWalk && io.commits(i).bits.uop.ctrl.commitType === CommitType.STORE)
64  val mcommitIdx = (0 until CommitWidth).map(i => io.commits(i).bits.uop.sqIdx.value)
65
66  val tailMask = (((1.U((StoreQueueSize + 1).W)) << ringBufferTail).asUInt - 1.U)(StoreQueueSize - 1, 0)
67  val headMask = (((1.U((StoreQueueSize + 1).W)) << ringBufferHead).asUInt - 1.U)(StoreQueueSize - 1, 0)
68  val enqDeqMask1 = tailMask ^ headMask
69  val enqDeqMask = Mux(ringBufferSameFlag, enqDeqMask1, ~enqDeqMask1)
70
71  // Enqueue at dispatch
72  val emptyEntries = StoreQueueSize.U - distanceBetween(ringBufferHeadExtended, ringBufferTailExtended)
73  XSDebug("(ready, valid): ")
74  for (i <- 0 until RenameWidth) {
75    val offset = if (i == 0) 0.U else PopCount((0 until i).map(io.dp1Req(_).valid))
76    val sqIdx = ringBufferHeadExtended + offset
77    val index = sqIdx.value
78    when(io.dp1Req(i).fire()) {
79      uop(index) := io.dp1Req(i).bits
80      allocated(index) := true.B
81      datavalid(index) := false.B
82      writebacked(index) := false.B
83      commited(index) := false.B
84      pending(index) := false.B
85    }
86    val numTryEnqueue = offset +& io.dp1Req(i).valid
87    io.sqReady(i) := numTryEnqueue <= emptyEntries
88    io.dp1Req(i).ready := io.lqReady(i) && io.sqReady(i)
89    io.sqIdxs(i) := sqIdx
90    XSDebug(false, true.B, "(%d, %d) ", io.dp1Req(i).ready, io.dp1Req(i).valid)
91  }
92  XSDebug(false, true.B, "\n")
93
94  val firedDispatch = VecInit((0 until CommitWidth).map(io.dp1Req(_).fire())).asUInt
95  when(firedDispatch.orR) {
96    ringBufferHeadExtended := ringBufferHeadExtended + PopCount(firedDispatch)
97    XSInfo("dispatched %d insts to sq\n", PopCount(firedDispatch))
98  }
99
100  // writeback store
101  (0 until StorePipelineWidth).map(i => {
102    dataModule.io.wb(i).wen := false.B
103    when(io.storeIn(i).fire()) {
104      val stWbIndex = io.storeIn(i).bits.uop.sqIdx.value
105      val hasException = io.storeIn(i).bits.uop.cf.exceptionVec.asUInt.orR
106      datavalid(stWbIndex) := !io.storeIn(i).bits.mmio || hasException
107      pending(stWbIndex) := io.storeIn(i).bits.mmio && !hasException
108
109      val storeWbData = Wire(new LsqEntry)
110      storeWbData := DontCare
111      storeWbData.paddr := io.storeIn(i).bits.paddr
112      storeWbData.vaddr := io.storeIn(i).bits.vaddr
113      storeWbData.mask := io.storeIn(i).bits.mask
114      storeWbData.data := io.storeIn(i).bits.data
115      storeWbData.mmio := io.storeIn(i).bits.mmio
116      storeWbData.exception := io.storeIn(i).bits.uop.cf.exceptionVec.asUInt
117
118      dataModule.io.wbWrite(i, stWbIndex, storeWbData)
119      dataModule.io.wb(i).wen := true.B
120
121      XSInfo("store write to sq idx %d pc 0x%x vaddr %x paddr %x data %x mmio %x roll %x exc %x\n",
122        io.storeIn(i).bits.uop.sqIdx.value,
123        io.storeIn(i).bits.uop.cf.pc,
124        io.storeIn(i).bits.vaddr,
125        io.storeIn(i).bits.paddr,
126        io.storeIn(i).bits.data,
127        io.storeIn(i).bits.mmio,
128        io.storeIn(i).bits.rollback,
129        io.storeIn(i).bits.uop.cf.exceptionVec.asUInt
130        )
131    }
132  })
133
134  def getFirstOne(mask: Vec[Bool], startMask: UInt) = {
135    val length = mask.length
136    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
137    val highBitsUint = Cat(highBits.reverse)
138    PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt))
139  }
140
141  def getFirstOneWithFlag(mask: Vec[Bool], startMask: UInt, startFlag: Bool) = {
142    val length = mask.length
143    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
144    val highBitsUint = Cat(highBits.reverse)
145    val changeDirection = !highBitsUint.orR()
146    val index = PriorityEncoder(Mux(!changeDirection, highBitsUint, mask.asUInt))
147    SqPtr(startFlag ^ changeDirection, index)
148  }
149
150  def selectFirstTwo(valid: Vec[Bool], startMask: UInt) = {
151    val selVec = Wire(Vec(2, UInt(log2Up(StoreQueueSize).W)))
152    val selValid = Wire(Vec(2, Bool()))
153    selVec(0) := getFirstOne(valid, startMask)
154    val firstSelMask = UIntToOH(selVec(0))
155    val secondSelVec = VecInit((0 until valid.length).map(i => valid(i) && !firstSelMask(i)))
156    selVec(1) := getFirstOne(secondSelVec, startMask)
157    selValid(0) := Cat(valid).orR
158    selValid(1) := Cat(secondSelVec).orR
159    (selValid, selVec)
160  }
161
162  def selectFirstTwoRoughly(valid: Vec[Bool]) = {
163    // TODO: do not select according to seq, just select 2 valid bit randomly
164    val firstSelVec = valid
165    val notFirstVec = Wire(Vec(valid.length, Bool()))
166    (0 until valid.length).map(i =>
167      notFirstVec(i) := (if(i != 0) { valid(i) || !notFirstVec(i) } else { false.B })
168    )
169    val secondSelVec = VecInit((0 until valid.length).map(i => valid(i) && !notFirstVec(i)))
170
171    val selVec = Wire(Vec(2, UInt(log2Up(valid.length).W)))
172    val selValid = Wire(Vec(2, Bool()))
173    selVec(0) := PriorityEncoder(firstSelVec)
174    selVec(1) := PriorityEncoder(secondSelVec)
175    selValid(0) := Cat(firstSelVec).orR
176    selValid(1) := Cat(secondSelVec).orR
177    (selValid, selVec)
178  }
179
180  // select the last writebacked instruction
181  val validStoreVec = VecInit((0 until StoreQueueSize).map(i => !(allocated(i) && datavalid(i))))
182  val storeNotValid = SqPtr(false.B, getFirstOne(validStoreVec, tailMask))
183  val storeValidIndex = (storeNotValid - 1.U).value
184  io.oldestStore.valid := allocated(ringBufferTailExtended.value) && datavalid(ringBufferTailExtended.value) && !commited(storeValidIndex)
185  io.oldestStore.bits := uop(storeValidIndex).roqIdx
186
187  // writeback up to 2 store insts to CDB
188  // choose the first two valid store requests from deqPtr
189  val storeWbSelVec = VecInit((0 until StoreQueueSize).map(i => allocated(i) && datavalid(i) && !writebacked(i)))
190  val (storeWbValid, storeWbSel) = selectFirstTwo(storeWbSelVec, tailMask)
191
192  (0 until StorePipelineWidth).map(i => {
193    io.stout(i).bits.uop := uop(storeWbSel(i))
194    io.stout(i).bits.uop.sqIdx := storeWbSel(i).asTypeOf(new SqPtr)
195    io.stout(i).bits.uop.cf.exceptionVec := dataModule.io.rdata(storeWbSel(i)).exception.asBools
196    io.stout(i).bits.data := dataModule.io.rdata(storeWbSel(i)).data
197    io.stout(i).bits.redirectValid := false.B
198    io.stout(i).bits.redirect := DontCare
199    io.stout(i).bits.brUpdate := DontCare
200    io.stout(i).bits.debug.isMMIO := dataModule.io.rdata(storeWbSel(i)).mmio
201    io.stout(i).valid := storeWbSelVec(storeWbSel(i)) && storeWbValid(i)
202    when(io.stout(i).fire()) {
203      writebacked(storeWbSel(i)) := true.B
204    }
205    io.stout(i).bits.fflags := DontCare
206  })
207
208  // remove retired insts from sq, add retired store to sbuffer
209
210  // move tailPtr
211  // TailPtr slow recovery: recycle bubbles in store queue
212  // allocatedMask: dequeuePtr can go to the next 1-bit
213  val allocatedMask = VecInit((0 until StoreQueueSize).map(i => allocated(i) || !enqDeqMask(i)))
214  // find the first one from deqPtr (ringBufferTail)
215  val nextTail1 = getFirstOneWithFlag(allocatedMask, tailMask, ringBufferTailExtended.flag)
216  val nextTail = Mux(Cat(allocatedMask).orR, nextTail1, ringBufferHeadExtended)
217  ringBufferTailExtended := nextTail
218
219  // TailPtr fast recovery
220  val tailRecycle = VecInit(List(
221    io.uncache.resp.fire() || io.sbuffer(0).fire(),
222    io.sbuffer(1).fire()
223  ))
224
225  when(tailRecycle.asUInt.orR){
226    ringBufferTailExtended := ringBufferTailExtended + PopCount(tailRecycle.asUInt)
227  }
228
229  // load forward query
230  // check over all lq entries and forward data from the first matched store
231  (0 until LoadPipelineWidth).map(i => {
232    io.forward(i).forwardMask := 0.U(8.W).asBools
233    io.forward(i).forwardData := DontCare
234
235    // Compare ringBufferTail (deqPtr) and forward.sqIdx, we have two cases:
236    // (1) if they have the same flag, we need to check range(tail, sqIdx)
237    // (2) if they have different flags, we need to check range(tail, LoadQueueSize) and range(0, sqIdx)
238    // Forward1: Mux(same_flag, range(tail, sqIdx), range(tail, LoadQueueSize))
239    // Forward2: Mux(same_flag, 0.U,                   range(0, sqIdx)    )
240    // i.e. forward1 is the target entries with the same flag bits and forward2 otherwise
241
242    val differentFlag = ringBufferTailExtended.flag =/= io.forward(i).sqIdx.flag
243    val forwardMask = ((1.U((StoreQueueSize + 1).W)) << io.forward(i).sqIdx.value).asUInt - 1.U
244    val storeWritebackedVec = WireInit(VecInit(Seq.fill(StoreQueueSize)(false.B)))
245    for (j <- 0 until StoreQueueSize) {
246      storeWritebackedVec(j) := datavalid(j) && allocated(j) // all datavalid terms need to be checked
247    }
248    val needForward1 = Mux(differentFlag, ~tailMask, tailMask ^ forwardMask) & storeWritebackedVec.asUInt
249    val needForward2 = Mux(differentFlag, forwardMask, 0.U(StoreQueueSize.W)) & storeWritebackedVec.asUInt
250
251    XSDebug("" + i + " f1 %b f2 %b sqIdx %d pa %x\n", needForward1, needForward2, io.forward(i).sqIdx.asUInt, io.forward(i).paddr)
252
253    // do real fwd query
254    dataModule.io.forwardQuery(
255      channel = i,
256      paddr = io.forward(i).paddr,
257      needForward1 = needForward1,
258      needForward2 = needForward2
259    )
260
261    io.forward(i).forwardMask := dataModule.io.forward(i).forwardMask
262    io.forward(i).forwardData := dataModule.io.forward(i).forwardData
263  })
264
265  // When store commited, mark it as commited (will not be influenced by redirect),
266  (0 until CommitWidth).map(i => {
267    when(storeCommit(i)) {
268      commited(mcommitIdx(i)) := true.B
269      XSDebug("store commit %d: idx %d %x\n", i.U, mcommitIdx(i), uop(mcommitIdx(i)).cf.pc)
270    }
271  })
272
273  (0 until 2).map(i => {
274    val ptr = (ringBufferTailExtended + i.U).value
275    val mmio = dataModule.io.rdata(ptr).mmio
276    io.sbuffer(i).valid := allocated(ptr) && commited(ptr) && !mmio
277    io.sbuffer(i).bits.cmd  := MemoryOpConstants.M_XWR
278    io.sbuffer(i).bits.addr := dataModule.io.rdata(ptr).paddr
279    io.sbuffer(i).bits.data := dataModule.io.rdata(ptr).data
280    io.sbuffer(i).bits.mask := dataModule.io.rdata(ptr).mask
281    io.sbuffer(i).bits.meta          := DontCare
282    io.sbuffer(i).bits.meta.tlb_miss := false.B
283    io.sbuffer(i).bits.meta.uop      := DontCare
284    io.sbuffer(i).bits.meta.mmio     := mmio
285    io.sbuffer(i).bits.meta.mask     := dataModule.io.rdata(ptr).mask
286
287    when(io.sbuffer(i).fire()) {
288      allocated(ptr) := false.B
289      XSDebug("sbuffer "+i+" fire: ptr %d\n", ptr)
290    }
291  })
292
293  // Memory mapped IO / other uncached operations
294
295  // setup misc mem access req
296  // mask / paddr / data can be get from sq.data
297  val commitType = io.commits(0).bits.uop.ctrl.commitType
298  io.uncache.req.valid := pending(ringBufferTail) && allocated(ringBufferTail) &&
299    commitType === CommitType.STORE &&
300    io.roqDeqPtr === uop(ringBufferTail).roqIdx &&
301    !io.commits(0).bits.isWalk
302
303  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XWR
304  io.uncache.req.bits.addr := dataModule.io.rdata(ringBufferTail).paddr
305  io.uncache.req.bits.data := dataModule.io.rdata(ringBufferTail).data
306  io.uncache.req.bits.mask := dataModule.io.rdata(ringBufferTail).mask
307
308  io.uncache.req.bits.meta.id       := DontCare // TODO: // FIXME
309  io.uncache.req.bits.meta.vaddr    := DontCare
310  io.uncache.req.bits.meta.paddr    := dataModule.io.rdata(ringBufferTail).paddr
311  io.uncache.req.bits.meta.uop      := uop(ringBufferTail)
312  io.uncache.req.bits.meta.mmio     := true.B // dataModule.io.rdata(ringBufferTail).mmio
313  io.uncache.req.bits.meta.tlb_miss := false.B
314  io.uncache.req.bits.meta.mask     := dataModule.io.rdata(ringBufferTail).mask
315  io.uncache.req.bits.meta.replay   := false.B
316
317  io.uncache.resp.ready := true.B
318
319  when(io.uncache.req.fire()){
320    pending(ringBufferTail) := false.B
321  }
322
323  when(io.uncache.resp.fire()){
324    datavalid(ringBufferTail) := true.B // will be writeback to CDB in the next cycle
325    // TODO: write back exception info
326  }
327
328  when(io.uncache.req.fire()){
329    XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n",
330      uop(ringBufferTail).cf.pc,
331      io.uncache.req.bits.addr,
332      io.uncache.req.bits.data,
333      io.uncache.req.bits.cmd,
334      io.uncache.req.bits.mask
335    )
336  }
337
338  // Read vaddr for mem exception
339  io.exceptionAddr.vaddr := dataModule.io.rdata(io.exceptionAddr.lsIdx.sqIdx.value).vaddr
340
341  // misprediction recovery / exception redirect
342  // invalidate sq term using robIdx
343  val needCancel = Wire(Vec(StoreQueueSize, Bool()))
344  for (i <- 0 until StoreQueueSize) {
345    needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect) && allocated(i) && !commited(i)
346    when(needCancel(i)) {
347      when(io.brqRedirect.bits.isReplay){
348        datavalid(i) := false.B
349        writebacked(i) := false.B
350        pending(i) := false.B
351      }.otherwise{
352        allocated(i) := false.B
353      }
354    }
355  }
356  when (io.brqRedirect.valid && io.brqRedirect.bits.isMisPred) {
357    ringBufferHeadExtended := ringBufferHeadExtended - PopCount(needCancel)
358  }
359
360  // debug info
361  XSDebug("head %d:%d tail %d:%d\n", ringBufferHeadExtended.flag, ringBufferHead, ringBufferTailExtended.flag, ringBufferTail)
362
363  def PrintFlag(flag: Bool, name: String): Unit = {
364    when(flag) {
365      XSDebug(false, true.B, name)
366    }.otherwise {
367      XSDebug(false, true.B, " ")
368    }
369  }
370
371  for (i <- 0 until StoreQueueSize) {
372    if (i % 4 == 0) XSDebug("")
373    XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.rdata(i).paddr)
374    PrintFlag(allocated(i), "a")
375    PrintFlag(allocated(i) && datavalid(i), "v")
376    PrintFlag(allocated(i) && writebacked(i), "w")
377    PrintFlag(allocated(i) && commited(i), "c")
378    PrintFlag(allocated(i) && pending(i), "p")
379    XSDebug(false, true.B, " ")
380    if (i % 4 == 3 || i == StoreQueueSize - 1) XSDebug(false, true.B, "\n")
381  }
382
383}
384