xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/StoreQueue.scala (revision 6374b1d62bdc41478bd6fe6a4dc48e131c2769f7)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import chisel3._
20import chisel3.util._
21import difftest._
22import difftest.common.DifftestMem
23import org.chipsalliance.cde.config.Parameters
24import utility._
25import utils._
26import xiangshan._
27import xiangshan.cache._
28import xiangshan.cache.{DCacheLineIO, DCacheWordIO, MemoryOpConstants}
29import xiangshan.backend._
30import xiangshan.backend.rob.{RobLsqIO, RobPtr}
31import xiangshan.backend.Bundles.{DynInst, MemExuOutput}
32import xiangshan.backend.decode.isa.bitfield.{Riscv32BitInst, XSInstBitFields}
33import xiangshan.backend.fu.FuConfig._
34import xiangshan.backend.fu.FuType
35
36class SqPtr(implicit p: Parameters) extends CircularQueuePtr[SqPtr](
37  p => p(XSCoreParamsKey).StoreQueueSize
38){
39}
40
41object SqPtr {
42  def apply(f: Bool, v: UInt)(implicit p: Parameters): SqPtr = {
43    val ptr = Wire(new SqPtr)
44    ptr.flag := f
45    ptr.value := v
46    ptr
47  }
48}
49
50class SqEnqIO(implicit p: Parameters) extends MemBlockBundle {
51  val canAccept = Output(Bool())
52  val lqCanAccept = Input(Bool())
53  val needAlloc = Vec(LSQEnqWidth, Input(Bool()))
54  val req = Vec(LSQEnqWidth, Flipped(ValidIO(new DynInst)))
55  val resp = Vec(LSQEnqWidth, Output(new SqPtr))
56}
57
58class DataBufferEntry (implicit p: Parameters)  extends DCacheBundle {
59  val addr   = UInt(PAddrBits.W)
60  val vaddr  = UInt(VAddrBits.W)
61  val data   = UInt(VLEN.W)
62  val mask   = UInt((VLEN/8).W)
63  val wline = Bool()
64  val sqPtr  = new SqPtr
65  val prefetch = Bool()
66  val vecValid = Bool()
67}
68
69class StoreExceptionBuffer(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper {
70  val io = IO(new Bundle() {
71    val redirect = Flipped(ValidIO(new Redirect))
72    val storeAddrIn = Vec(StorePipelineWidth + VecStorePipelineWidth, Flipped(ValidIO(new LsPipelineBundle())))
73    val exceptionAddr = new ExceptionAddrIO
74  })
75
76  val req_valid = RegInit(false.B)
77  val req = Reg(new LsPipelineBundle())
78
79  // enqueue
80  // S1:
81  val s1_req = VecInit(io.storeAddrIn.map(_.bits))
82  val s1_valid = VecInit(io.storeAddrIn.map(_.valid))
83
84  // S2: delay 1 cycle
85  val s2_req = RegNext(s1_req)
86  val s2_valid = (0 until StorePipelineWidth + VecStorePipelineWidth).map(i =>
87    RegNext(s1_valid(i)) &&
88      !s2_req(i).uop.robIdx.needFlush(RegNext(io.redirect)) &&
89      !s2_req(i).uop.robIdx.needFlush(io.redirect)
90  )
91  val s2_has_exception = s2_req.map(x => ExceptionNO.selectByFu(x.uop.exceptionVec, StaCfg).asUInt.orR)
92
93  val s2_enqueue = Wire(Vec(StorePipelineWidth + VecStorePipelineWidth, Bool()))
94  for (w <- 0 until StorePipelineWidth + VecStorePipelineWidth) {
95    s2_enqueue(w) := s2_valid(w) && s2_has_exception(w)
96  }
97
98  when (req_valid && req.uop.robIdx.needFlush(io.redirect)) {
99    req_valid := s2_enqueue.asUInt.orR
100  }.elsewhen (s2_enqueue.asUInt.orR) {
101    req_valid := req_valid || true.B
102  }
103
104  def selectOldest[T <: LsPipelineBundle](valid: Seq[Bool], bits: Seq[T]): (Seq[Bool], Seq[T]) = {
105    assert(valid.length == bits.length)
106    if (valid.length == 0 || valid.length == 1) {
107      (valid, bits)
108    } else if (valid.length == 2) {
109      val res = Seq.fill(2)(Wire(Valid(chiselTypeOf(bits(0)))))
110      for (i <- res.indices) {
111        res(i).valid := valid(i)
112        res(i).bits := bits(i)
113      }
114      val oldest = Mux(valid(0) && valid(1),
115        Mux(isAfter(bits(0).uop.robIdx, bits(1).uop.robIdx) ||
116          (isNotBefore(bits(0).uop.robIdx, bits(1).uop.robIdx) && bits(0).uop.uopIdx > bits(1).uop.uopIdx), res(1), res(0)),
117        Mux(valid(0) && !valid(1), res(0), res(1)))
118      (Seq(oldest.valid), Seq(oldest.bits))
119    } else {
120      val left = selectOldest(valid.take(valid.length / 2), bits.take(bits.length / 2))
121      val right = selectOldest(valid.takeRight(valid.length - (valid.length / 2)), bits.takeRight(bits.length - (bits.length / 2)))
122      selectOldest(left._1 ++ right._1, left._2 ++ right._2)
123    }
124  }
125
126  val reqSel = selectOldest(s2_enqueue, s2_req)
127
128  when (req_valid) {
129    req := Mux(
130      reqSel._1(0) && (isAfter(req.uop.robIdx, reqSel._2(0).uop.robIdx) || (isNotBefore(req.uop.robIdx, reqSel._2(0).uop.robIdx) && req.uop.uopIdx > reqSel._2(0).uop.uopIdx)),
131      reqSel._2(0),
132      req)
133  } .elsewhen (s2_enqueue.asUInt.orR) {
134    req := reqSel._2(0)
135  }
136
137  io.exceptionAddr.vaddr  := req.vaddr
138  io.exceptionAddr.gpaddr := req.gpaddr
139  io.exceptionAddr.vstart := req.uop.vpu.vstart
140  io.exceptionAddr.vl     := req.uop.vpu.vl
141}
142
143// Store Queue
144class StoreQueue(implicit p: Parameters) extends XSModule
145  with HasDCacheParameters
146  with HasCircularQueuePtrHelper
147  with HasPerfEvents
148  with HasVLSUParameters {
149  val io = IO(new Bundle() {
150    val hartId = Input(UInt(hartIdLen.W))
151    val enq = new SqEnqIO
152    val brqRedirect = Flipped(ValidIO(new Redirect))
153    val vecFeedback = Vec(VecLoadPipelineWidth, Flipped(ValidIO(new FeedbackToLsqIO)))
154    val storeAddrIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) // store addr, data is not included
155    val storeAddrInRe = Vec(StorePipelineWidth, Input(new LsPipelineBundle())) // store more mmio and exception
156    val storeDataIn = Vec(StorePipelineWidth, Flipped(Valid(new MemExuOutput(isVector = true)))) // store data, send to sq from rs
157    val storeMaskIn = Vec(StorePipelineWidth, Flipped(Valid(new StoreMaskBundle))) // store mask, send to sq from rs
158    val sbuffer = Vec(EnsbufferWidth, Decoupled(new DCacheWordReqWithVaddrAndPfFlag)) // write committed store to sbuffer
159    val sbufferVecDifftestInfo = Vec(EnsbufferWidth, Decoupled(new DynInst)) // The vector store difftest needs is, write committed store to sbuffer
160    val uncacheOutstanding = Input(Bool())
161    val mmioStout = DecoupledIO(new MemExuOutput) // writeback uncached store
162    val vecmmioStout = DecoupledIO(new MemExuOutput(isVector = true))
163    val forward = Vec(LoadPipelineWidth, Flipped(new PipeLoadForwardQueryIO))
164    // TODO: scommit is only for scalar store
165    val rob = Flipped(new RobLsqIO)
166    val uncache = new UncacheWordIO
167    // val refill = Flipped(Valid(new DCacheLineReq ))
168    val exceptionAddr = new ExceptionAddrIO
169    val sqEmpty = Output(Bool())
170    val stAddrReadySqPtr = Output(new SqPtr)
171    val stAddrReadyVec = Output(Vec(StoreQueueSize, Bool()))
172    val stDataReadySqPtr = Output(new SqPtr)
173    val stDataReadyVec = Output(Vec(StoreQueueSize, Bool()))
174    val stIssuePtr = Output(new SqPtr)
175    val sqDeqPtr = Output(new SqPtr)
176    val sqFull = Output(Bool())
177    val sqCancelCnt = Output(UInt(log2Up(StoreQueueSize + 1).W))
178    val sqDeq = Output(UInt(log2Ceil(EnsbufferWidth + 1).W))
179    val force_write = Output(Bool())
180  })
181
182  println("StoreQueue: size:" + StoreQueueSize)
183
184  // data modules
185  val uop = Reg(Vec(StoreQueueSize, new DynInst))
186  // val data = Reg(Vec(StoreQueueSize, new LsqEntry))
187  val dataModule = Module(new SQDataModule(
188    numEntries = StoreQueueSize,
189    numRead = EnsbufferWidth,
190    numWrite = StorePipelineWidth,
191    numForward = LoadPipelineWidth
192  ))
193  dataModule.io := DontCare
194  val paddrModule = Module(new SQAddrModule(
195    dataWidth = PAddrBits,
196    numEntries = StoreQueueSize,
197    numRead = EnsbufferWidth,
198    numWrite = StorePipelineWidth,
199    numForward = LoadPipelineWidth
200  ))
201  paddrModule.io := DontCare
202  val vaddrModule = Module(new SQAddrModule(
203    dataWidth = VAddrBits,
204    numEntries = StoreQueueSize,
205    numRead = EnsbufferWidth, // sbuffer; badvaddr will be sent from exceptionBuffer
206    numWrite = StorePipelineWidth,
207    numForward = LoadPipelineWidth
208  ))
209  vaddrModule.io := DontCare
210  val dataBuffer = Module(new DatamoduleResultBuffer(new DataBufferEntry))
211  val difftestBuffer = if (env.EnableDifftest) Some(Module(new DatamoduleResultBuffer(new DynInst))) else None
212  val exceptionBuffer = Module(new StoreExceptionBuffer)
213  exceptionBuffer.io.redirect := io.brqRedirect
214  exceptionBuffer.io.exceptionAddr.isStore := DontCare
215  // vlsu exception!
216  for (i <- 0 until VecStorePipelineWidth) {
217    exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).valid               := io.vecFeedback(i).valid && io.vecFeedback(i).bits.feedback(VecFeedbacks.FLUSH) // have exception
218    exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).bits                := DontCare
219    exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).bits.vaddr          := io.vecFeedback(i).bits.vaddr
220    exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).bits.uop.uopIdx     := io.vecFeedback(i).bits.uopidx
221    exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).bits.uop.robIdx     := io.vecFeedback(i).bits.robidx
222    exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).bits.uop.vpu.vstart := io.vecFeedback(i).bits.vstart
223    exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).bits.uop.vpu.vl     := io.vecFeedback(i).bits.vl
224    exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).bits.uop.exceptionVec     := io.vecFeedback(i).bits.exceptionVec
225  }
226
227
228  val debug_paddr = Reg(Vec(StoreQueueSize, UInt((PAddrBits).W)))
229  val debug_vaddr = Reg(Vec(StoreQueueSize, UInt((VAddrBits).W)))
230  val debug_data = Reg(Vec(StoreQueueSize, UInt((XLEN).W)))
231
232  // state & misc
233  val allocated = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // sq entry has been allocated
234  val addrvalid = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // non-mmio addr is valid
235  val datavalid = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // non-mmio data is valid
236  val allvalid  = VecInit((0 until StoreQueueSize).map(i => addrvalid(i) && datavalid(i))) // non-mmio data & addr is valid
237  val committed = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // inst has been committed by rob
238  val pending = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of rob
239  val mmio = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // mmio: inst is an mmio inst
240  val atomic = RegInit(VecInit(List.fill(StoreQueueSize)(false.B)))
241  val prefetch = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // need prefetch when committing this store to sbuffer?
242  val isVec = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // vector store instruction
243  //val vec_lastuop = Reg(Vec(StoreQueueSize, Bool())) // last uop of vector store instruction
244  val vecMbCommit = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // vector store committed from merge buffer to rob
245  val vecDataValid = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // vector store need write to sbuffer
246  // val vec_robCommit = Reg(Vec(StoreQueueSize, Bool())) // vector store committed by rob
247  // val vec_secondInv = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // Vector unit-stride, second entry is invalid
248
249  // ptr
250  val enqPtrExt = RegInit(VecInit((0 until io.enq.req.length).map(_.U.asTypeOf(new SqPtr))))
251  val rdataPtrExt = RegInit(VecInit((0 until EnsbufferWidth).map(_.U.asTypeOf(new SqPtr))))
252  val deqPtrExt = RegInit(VecInit((0 until EnsbufferWidth).map(_.U.asTypeOf(new SqPtr))))
253  val cmtPtrExt = RegInit(VecInit((0 until CommitWidth).map(_.U.asTypeOf(new SqPtr))))
254  val addrReadyPtrExt = RegInit(0.U.asTypeOf(new SqPtr))
255  val dataReadyPtrExt = RegInit(0.U.asTypeOf(new SqPtr))
256
257  val enqPtr = enqPtrExt(0).value
258  val deqPtr = deqPtrExt(0).value
259  val cmtPtr = cmtPtrExt(0).value
260
261  val validCount = distanceBetween(enqPtrExt(0), deqPtrExt(0))
262  val allowEnqueue = validCount <= (StoreQueueSize - LSQStEnqWidth).U
263
264  val deqMask = UIntToMask(deqPtr, StoreQueueSize)
265  val enqMask = UIntToMask(enqPtr, StoreQueueSize)
266
267  // TODO: count commit numbers for scalar / vector store separately
268  val scalarCommitCount = RegInit(0.U(log2Ceil(StoreQueueSize + 1).W))
269  val scalarCommitted = WireInit(0.U(log2Ceil(CommitWidth + 1).W))
270  val vecCommitted = WireInit(0.U(log2Ceil(CommitWidth + 1).W))
271  val commitCount = WireInit(0.U(log2Ceil(CommitWidth + 1).W))
272  val scommit = RegNext(io.rob.scommit)
273
274  scalarCommitCount := scalarCommitCount + scommit - scalarCommitted
275
276  // store can be committed by ROB
277  io.rob.mmio := DontCare
278  io.rob.uop := DontCare
279
280  // Read dataModule
281  assert(EnsbufferWidth <= 2)
282  // rdataPtrExtNext and rdataPtrExtNext+1 entry will be read from dataModule
283  val rdataPtrExtNext = WireInit(Mux(dataBuffer.io.enq(1).fire,
284    VecInit(rdataPtrExt.map(_ + 2.U)),
285    Mux(dataBuffer.io.enq(0).fire || io.mmioStout.fire || io.vecmmioStout.fire,
286      VecInit(rdataPtrExt.map(_ + 1.U)),
287      rdataPtrExt
288    )
289  ))
290
291  // deqPtrExtNext traces which inst is about to leave store queue
292  //
293  // io.sbuffer(i).fire is RegNexted, as sbuffer data write takes 2 cycles.
294  // Before data write finish, sbuffer is unable to provide store to load
295  // forward data. As an workaround, deqPtrExt and allocated flag update
296  // is delayed so that load can get the right data from store queue.
297  //
298  // Modify deqPtrExtNext and io.sqDeq with care!
299  val deqPtrExtNext = Mux(RegNext(io.sbuffer(1).fire),
300    VecInit(deqPtrExt.map(_ + 2.U)),
301    Mux((RegNext(io.sbuffer(0).fire)) || io.mmioStout.fire || io.vecmmioStout.fire,
302      VecInit(deqPtrExt.map(_ + 1.U)),
303      deqPtrExt
304    )
305  )
306  io.sqDeq := RegNext(Mux(RegNext(io.sbuffer(1).fire), 2.U,
307    Mux((RegNext(io.sbuffer(0).fire)) || io.mmioStout.fire || io.vecmmioStout.fire, 1.U, 0.U)
308  ))
309  assert(!RegNext(RegNext(io.sbuffer(0).fire) && (io.mmioStout.fire || io.vecmmioStout.fire)))
310
311  for (i <- 0 until EnsbufferWidth) {
312    dataModule.io.raddr(i) := rdataPtrExtNext(i).value
313    paddrModule.io.raddr(i) := rdataPtrExtNext(i).value
314    vaddrModule.io.raddr(i) := rdataPtrExtNext(i).value
315  }
316
317  /**
318    * Enqueue at dispatch
319    *
320    * Currently, StoreQueue only allows enqueue when #emptyEntries > EnqWidth
321    */
322  io.enq.canAccept := allowEnqueue
323  val canEnqueue = io.enq.req.map(_.valid)
324  val enqCancel = io.enq.req.map(_.bits.robIdx.needFlush(io.brqRedirect))
325  val vStoreFlow = io.enq.req.map(_.bits.numLsElem)
326  val validVStoreFlow = vStoreFlow.zipWithIndex.map{case (vLoadFlowNumItem, index) => Mux(!RegNext(io.brqRedirect.valid) && io.enq.canAccept && io.enq.lqCanAccept && canEnqueue(index), vLoadFlowNumItem, 0.U)}
327  val validVStoreOffset = vStoreFlow.zip(io.enq.needAlloc).map{case (flow, needAllocItem) => Mux(needAllocItem, flow, 0.U)}
328  val validVStoreOffsetRShift = 0.U +: validVStoreOffset.take(vStoreFlow.length - 1)
329
330  for (i <- 0 until io.enq.req.length) {
331    val sqIdx = enqPtrExt(0) + validVStoreOffsetRShift.take(i + 1).reduce(_ + _)
332    val index = io.enq.req(i).bits.sqIdx
333    val enqInstr = io.enq.req(i).bits.instr.asTypeOf(new XSInstBitFields)
334    when (canEnqueue(i) && !enqCancel(i)) {
335      for (j <- 0 until VecMemDispatchMaxNumber) {
336        when (j.U < validVStoreOffset(i)) {
337          uop((index + j.U).value) := io.enq.req(i).bits
338          // NOTE: the index will be used when replay
339          uop((index + j.U).value).sqIdx := sqIdx + j.U
340          allocated((index + j.U).value) := true.B
341          datavalid((index + j.U).value) := false.B
342          addrvalid((index + j.U).value) := false.B
343          committed((index + j.U).value) := false.B
344          pending((index + j.U).value) := false.B
345          prefetch((index + j.U).value) := false.B
346          mmio((index + j.U).value) := false.B
347          isVec((index + j.U).value) := enqInstr.isVecStore // check vector store by the encoding of inst
348          vecMbCommit((index + j.U).value) := false.B
349          vecDataValid((index + j.U).value) := false.B
350          XSError(!io.enq.canAccept || !io.enq.lqCanAccept, s"must accept $i\n")
351          XSError(index.value =/= sqIdx.value, s"must be the same entry $i\n")
352        }
353      }
354    }
355    io.enq.resp(i) := sqIdx
356  }
357  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
358
359  /**
360    * Update addr/dataReadyPtr when issue from rs
361    */
362  // update issuePtr
363  val IssuePtrMoveStride = 4
364  require(IssuePtrMoveStride >= 2)
365
366  val addrReadyLookupVec = (0 until IssuePtrMoveStride).map(addrReadyPtrExt + _.U)
367  val addrReadyLookup = addrReadyLookupVec.map(ptr => allocated(ptr.value) &&
368   (mmio(ptr.value) || addrvalid(ptr.value) || vecMbCommit(ptr.value))
369    && ptr =/= enqPtrExt(0))
370  val nextAddrReadyPtr = addrReadyPtrExt + PriorityEncoder(VecInit(addrReadyLookup.map(!_) :+ true.B))
371  addrReadyPtrExt := nextAddrReadyPtr
372
373  (0 until StoreQueueSize).map(i => {
374    io.stAddrReadyVec(i) := RegNext(allocated(i) && (mmio(i) || addrvalid(i)))
375  })
376
377  when (io.brqRedirect.valid) {
378    addrReadyPtrExt := Mux(
379      isAfter(cmtPtrExt(0), deqPtrExt(0)),
380      cmtPtrExt(0),
381      deqPtrExtNext(0) // for mmio insts, deqPtr may be ahead of cmtPtr
382    )
383  }
384
385  io.stAddrReadySqPtr := addrReadyPtrExt
386
387  // update
388  val dataReadyLookupVec = (0 until IssuePtrMoveStride).map(dataReadyPtrExt + _.U)
389  val dataReadyLookup = dataReadyLookupVec.map(ptr => allocated(ptr.value) &&
390   (mmio(ptr.value) || datavalid(ptr.value) || vecMbCommit(ptr.value))
391    && ptr =/= enqPtrExt(0))
392  val nextDataReadyPtr = dataReadyPtrExt + PriorityEncoder(VecInit(dataReadyLookup.map(!_) :+ true.B))
393  dataReadyPtrExt := nextDataReadyPtr
394
395  (0 until StoreQueueSize).map(i => {
396    io.stDataReadyVec(i) := RegNext(allocated(i) && (mmio(i) || datavalid(i)))
397  })
398
399  when (io.brqRedirect.valid) {
400    dataReadyPtrExt := Mux(
401      isAfter(cmtPtrExt(0), deqPtrExt(0)),
402      cmtPtrExt(0),
403      deqPtrExtNext(0) // for mmio insts, deqPtr may be ahead of cmtPtr
404    )
405  }
406
407  io.stDataReadySqPtr := dataReadyPtrExt
408  io.stIssuePtr := enqPtrExt(0)
409  io.sqDeqPtr := deqPtrExt(0)
410
411  /**
412    * Writeback store from store units
413    *
414    * Most store instructions writeback to regfile in the previous cycle.
415    * However,
416    *   (1) For an mmio instruction with exceptions, we need to mark it as addrvalid
417    * (in this way it will trigger an exception when it reaches ROB's head)
418    * instead of pending to avoid sending them to lower level.
419    *   (2) For an mmio instruction without exceptions, we mark it as pending.
420    * When the instruction reaches ROB's head, StoreQueue sends it to uncache channel.
421    * Upon receiving the response, StoreQueue writes back the instruction
422    * through arbiter with store units. It will later commit as normal.
423    */
424
425  // Write addr to sq
426  for (i <- 0 until StorePipelineWidth) {
427    paddrModule.io.wen(i) := false.B
428    vaddrModule.io.wen(i) := false.B
429    dataModule.io.mask.wen(i) := false.B
430    val stWbIndex = io.storeAddrIn(i).bits.uop.sqIdx.value
431    exceptionBuffer.io.storeAddrIn(i).valid := io.storeAddrIn(i).fire && !io.storeAddrIn(i).bits.miss && !io.storeAddrIn(i).bits.isvec
432    exceptionBuffer.io.storeAddrIn(i).bits := io.storeAddrIn(i).bits
433
434    when (io.storeAddrIn(i).fire) {
435      val addr_valid = !io.storeAddrIn(i).bits.miss
436      addrvalid(stWbIndex) := addr_valid //!io.storeAddrIn(i).bits.mmio
437      // pending(stWbIndex) := io.storeAddrIn(i).bits.mmio
438
439      paddrModule.io.waddr(i) := stWbIndex
440      paddrModule.io.wdata(i) := io.storeAddrIn(i).bits.paddr
441      paddrModule.io.wmask(i) := io.storeAddrIn(i).bits.mask
442      paddrModule.io.wlineflag(i) := io.storeAddrIn(i).bits.wlineflag
443      paddrModule.io.wen(i) := true.B
444
445      vaddrModule.io.waddr(i) := stWbIndex
446      vaddrModule.io.wdata(i) := io.storeAddrIn(i).bits.vaddr
447      vaddrModule.io.wmask(i) := io.storeAddrIn(i).bits.mask
448      vaddrModule.io.wlineflag(i) := io.storeAddrIn(i).bits.wlineflag
449      vaddrModule.io.wen(i) := true.B
450
451      debug_paddr(paddrModule.io.waddr(i)) := paddrModule.io.wdata(i)
452
453      // mmio(stWbIndex) := io.storeAddrIn(i).bits.mmio
454
455      uop(stWbIndex) := io.storeAddrIn(i).bits.uop
456      uop(stWbIndex).debugInfo := io.storeAddrIn(i).bits.uop.debugInfo
457
458      vecDataValid(stWbIndex) := io.storeAddrIn(i).bits.isvec
459
460      XSInfo("store addr write to sq idx %d pc 0x%x miss:%d vaddr %x paddr %x mmio %x isvec %x\n",
461        io.storeAddrIn(i).bits.uop.sqIdx.value,
462        io.storeAddrIn(i).bits.uop.pc,
463        io.storeAddrIn(i).bits.miss,
464        io.storeAddrIn(i).bits.vaddr,
465        io.storeAddrIn(i).bits.paddr,
466        io.storeAddrIn(i).bits.mmio,
467        io.storeAddrIn(i).bits.isvec
468      )
469    }
470
471    // re-replinish mmio, for pma/pmp will get mmio one cycle later
472    val storeAddrInFireReg = RegNext(io.storeAddrIn(i).fire && !io.storeAddrIn(i).bits.miss)
473    val stWbIndexReg = RegNext(stWbIndex)
474    when (storeAddrInFireReg) {
475      pending(stWbIndexReg) := io.storeAddrInRe(i).mmio
476      mmio(stWbIndexReg) := io.storeAddrInRe(i).mmio
477      atomic(stWbIndexReg) := io.storeAddrInRe(i).atomic
478    }
479    // dcache miss info (one cycle later than storeIn)
480    // if dcache report a miss in sta pipeline, this store will trigger a prefetch when committing to sbuffer (if EnableAtCommitMissTrigger)
481    when (storeAddrInFireReg) {
482      prefetch(stWbIndexReg) := io.storeAddrInRe(i).miss
483    }
484
485    when(vaddrModule.io.wen(i)){
486      debug_vaddr(vaddrModule.io.waddr(i)) := vaddrModule.io.wdata(i)
487    }
488  }
489
490  // Write data to sq
491  // Now store data pipeline is actually 2 stages
492  for (i <- 0 until StorePipelineWidth) {
493    dataModule.io.data.wen(i) := false.B
494    val stWbIndex = io.storeDataIn(i).bits.uop.sqIdx.value
495    val isVec     = FuType.isVStore(io.storeDataIn(i).bits.uop.fuType)
496    // sq data write takes 2 cycles:
497    // sq data write s0
498    when (io.storeDataIn(i).fire) {
499      // send data write req to data module
500      dataModule.io.data.waddr(i) := stWbIndex
501      dataModule.io.data.wdata(i) := Mux(io.storeDataIn(i).bits.uop.fuOpType === LSUOpType.cbo_zero,
502        0.U,
503        Mux(isVec,
504          io.storeDataIn(i).bits.data,
505          genVWdata(io.storeDataIn(i).bits.data, io.storeDataIn(i).bits.uop.fuOpType(2,0)))
506      )
507      dataModule.io.data.wen(i) := true.B
508
509      debug_data(dataModule.io.data.waddr(i)) := dataModule.io.data.wdata(i)
510
511      XSInfo("store data write to sq idx %d pc 0x%x data %x -> %x\n",
512        io.storeDataIn(i).bits.uop.sqIdx.value,
513        io.storeDataIn(i).bits.uop.pc,
514        io.storeDataIn(i).bits.data,
515        dataModule.io.data.wdata(i)
516      )
517    }
518    // sq data write s1
519    when (
520      RegNext(io.storeDataIn(i).fire)
521      // && !RegNext(io.storeDataIn(i).bits.uop).robIdx.needFlush(io.brqRedirect)
522    ) {
523      datavalid(RegNext(stWbIndex)) := true.B
524    }
525  }
526
527  // Write mask to sq
528  for (i <- 0 until StorePipelineWidth) {
529    // sq mask write s0
530    when (io.storeMaskIn(i).fire) {
531      // send data write req to data module
532      dataModule.io.mask.waddr(i) := io.storeMaskIn(i).bits.sqIdx.value
533      dataModule.io.mask.wdata(i) := io.storeMaskIn(i).bits.mask
534      dataModule.io.mask.wen(i) := true.B
535    }
536  }
537
538  /**
539    * load forward query
540    *
541    * Check store queue for instructions that is older than the load.
542    * The response will be valid at the next cycle after req.
543    */
544  // check over all lq entries and forward data from the first matched store
545  for (i <- 0 until LoadPipelineWidth) {
546    // Compare deqPtr (deqPtr) and forward.sqIdx, we have two cases:
547    // (1) if they have the same flag, we need to check range(tail, sqIdx)
548    // (2) if they have different flags, we need to check range(tail, VirtualLoadQueueSize) and range(0, sqIdx)
549    // Forward1: Mux(same_flag, range(tail, sqIdx), range(tail, VirtualLoadQueueSize))
550    // Forward2: Mux(same_flag, 0.U,                   range(0, sqIdx)    )
551    // i.e. forward1 is the target entries with the same flag bits and forward2 otherwise
552    val differentFlag = deqPtrExt(0).flag =/= io.forward(i).sqIdx.flag
553    val forwardMask = io.forward(i).sqIdxMask
554    // all addrvalid terms need to be checked
555    // Real Vaild: all scalar stores, and vector store with (!inactive && !secondInvalid)
556    val addrRealValidVec = WireInit(VecInit((0 until StoreQueueSize).map(j => addrvalid(j) && allocated(j))))
557    // vector store will consider all inactive || secondInvalid flows as valid
558    val addrValidVec = WireInit(VecInit((0 until StoreQueueSize).map(j => addrvalid(j) && allocated(j))))
559    val dataValidVec = WireInit(VecInit((0 until StoreQueueSize).map(j => datavalid(j))))
560    val allValidVec  = WireInit(VecInit((0 until StoreQueueSize).map(j => addrvalid(j) && datavalid(j) && allocated(j))))
561
562    val lfstEnable = Constantin.createRecord("LFSTEnable", LFSTEnable)
563    val storeSetHitVec = Mux(lfstEnable,
564      WireInit(VecInit((0 until StoreQueueSize).map(j => io.forward(i).uop.loadWaitBit && uop(j).robIdx === io.forward(i).uop.waitForRobIdx))),
565      WireInit(VecInit((0 until StoreQueueSize).map(j => uop(j).storeSetHit && uop(j).ssid === io.forward(i).uop.ssid)))
566    )
567
568    val forwardMask1 = Mux(differentFlag, ~deqMask, deqMask ^ forwardMask)
569    val forwardMask2 = Mux(differentFlag, forwardMask, 0.U(StoreQueueSize.W))
570    val canForward1 = forwardMask1 & allValidVec.asUInt
571    val canForward2 = forwardMask2 & allValidVec.asUInt
572    val needForward = Mux(differentFlag, ~deqMask | forwardMask, deqMask ^ forwardMask)
573
574    XSDebug(p"$i f1 ${Binary(canForward1)} f2 ${Binary(canForward2)} " +
575      p"sqIdx ${io.forward(i).sqIdx} pa ${Hexadecimal(io.forward(i).paddr)}\n"
576    )
577
578    // do real fwd query (cam lookup in load_s1)
579    dataModule.io.needForward(i)(0) := canForward1 & vaddrModule.io.forwardMmask(i).asUInt
580    dataModule.io.needForward(i)(1) := canForward2 & vaddrModule.io.forwardMmask(i).asUInt
581
582    vaddrModule.io.forwardMdata(i) := io.forward(i).vaddr
583    vaddrModule.io.forwardDataMask(i) := io.forward(i).mask
584    paddrModule.io.forwardMdata(i) := io.forward(i).paddr
585    paddrModule.io.forwardDataMask(i) := io.forward(i).mask
586
587    // vaddr cam result does not equal to paddr cam result
588    // replay needed
589    // val vpmaskNotEqual = ((paddrModule.io.forwardMmask(i).asUInt ^ vaddrModule.io.forwardMmask(i).asUInt) & needForward) =/= 0.U
590    // val vaddrMatchFailed = vpmaskNotEqual && io.forward(i).valid
591    val vpmaskNotEqual = (
592      (RegNext(paddrModule.io.forwardMmask(i).asUInt) ^ RegNext(vaddrModule.io.forwardMmask(i).asUInt)) &
593      RegNext(needForward) &
594      RegNext(addrRealValidVec.asUInt)
595    ) =/= 0.U
596    val vaddrMatchFailed = vpmaskNotEqual && RegNext(io.forward(i).valid)
597    when (vaddrMatchFailed) {
598      XSInfo("vaddrMatchFailed: pc %x pmask %x vmask %x\n",
599        RegNext(io.forward(i).uop.pc),
600        RegNext(needForward & paddrModule.io.forwardMmask(i).asUInt),
601        RegNext(needForward & vaddrModule.io.forwardMmask(i).asUInt)
602      );
603    }
604    XSPerfAccumulate("vaddr_match_failed", vpmaskNotEqual)
605    XSPerfAccumulate("vaddr_match_really_failed", vaddrMatchFailed)
606
607    // Fast forward mask will be generated immediately (load_s1)
608    io.forward(i).forwardMaskFast := dataModule.io.forwardMaskFast(i)
609
610    // Forward result will be generated 1 cycle later (load_s2)
611    io.forward(i).forwardMask := dataModule.io.forwardMask(i)
612    io.forward(i).forwardData := dataModule.io.forwardData(i)
613    // If addr match, data not ready, mark it as dataInvalid
614    // load_s1: generate dataInvalid in load_s1 to set fastUop
615    val dataInvalidMask1 = (addrValidVec.asUInt & ~dataValidVec.asUInt & vaddrModule.io.forwardMmask(i).asUInt & forwardMask1.asUInt)
616    val dataInvalidMask2 = (addrValidVec.asUInt & ~dataValidVec.asUInt & vaddrModule.io.forwardMmask(i).asUInt & forwardMask2.asUInt)
617    val dataInvalidMask = dataInvalidMask1 | dataInvalidMask2
618    io.forward(i).dataInvalidFast := dataInvalidMask.orR
619
620    // make chisel happy
621    val dataInvalidMask1Reg = Wire(UInt(StoreQueueSize.W))
622    dataInvalidMask1Reg := RegNext(dataInvalidMask1)
623    // make chisel happy
624    val dataInvalidMask2Reg = Wire(UInt(StoreQueueSize.W))
625    dataInvalidMask2Reg := RegNext(dataInvalidMask2)
626    val dataInvalidMaskReg = dataInvalidMask1Reg | dataInvalidMask2Reg
627
628    // If SSID match, address not ready, mark it as addrInvalid
629    // load_s2: generate addrInvalid
630    val addrInvalidMask1 = (~addrValidVec.asUInt & storeSetHitVec.asUInt & forwardMask1.asUInt)
631    val addrInvalidMask2 = (~addrValidVec.asUInt & storeSetHitVec.asUInt & forwardMask2.asUInt)
632    // make chisel happy
633    val addrInvalidMask1Reg = Wire(UInt(StoreQueueSize.W))
634    addrInvalidMask1Reg := RegNext(addrInvalidMask1)
635    // make chisel happy
636    val addrInvalidMask2Reg = Wire(UInt(StoreQueueSize.W))
637    addrInvalidMask2Reg := RegNext(addrInvalidMask2)
638    val addrInvalidMaskReg = addrInvalidMask1Reg | addrInvalidMask2Reg
639
640    // load_s2
641    io.forward(i).dataInvalid := RegNext(io.forward(i).dataInvalidFast)
642    // check if vaddr forward mismatched
643    io.forward(i).matchInvalid := vaddrMatchFailed
644
645    // data invalid sq index
646    // check whether false fail
647    // check flag
648    val s2_differentFlag = RegNext(differentFlag)
649    val s2_enqPtrExt = RegNext(enqPtrExt(0))
650    val s2_deqPtrExt = RegNext(deqPtrExt(0))
651
652    // addr invalid sq index
653    // make chisel happy
654    val addrInvalidMaskRegWire = Wire(UInt(StoreQueueSize.W))
655    addrInvalidMaskRegWire := addrInvalidMaskReg
656    val addrInvalidFlag = addrInvalidMaskRegWire.orR
657    val hasInvalidAddr = (~addrValidVec.asUInt & needForward).orR
658
659    val addrInvalidSqIdx1 = OHToUInt(Reverse(PriorityEncoderOH(Reverse(addrInvalidMask1Reg))))
660    val addrInvalidSqIdx2 = OHToUInt(Reverse(PriorityEncoderOH(Reverse(addrInvalidMask2Reg))))
661    val addrInvalidSqIdx = Mux(addrInvalidMask2Reg.orR, addrInvalidSqIdx2, addrInvalidSqIdx1)
662
663    // store-set content management
664    //                +-----------------------+
665    //                | Search a SSID for the |
666    //                |    load operation     |
667    //                +-----------------------+
668    //                           |
669    //                           V
670    //                 +-------------------+
671    //                 | load wait strict? |
672    //                 +-------------------+
673    //                           |
674    //                           V
675    //               +----------------------+
676    //            Set|                      |Clean
677    //               V                      V
678    //  +------------------------+   +------------------------------+
679    //  | Waiting for all older  |   | Wait until the corresponding |
680    //  |   stores operations    |   | older store operations       |
681    //  +------------------------+   +------------------------------+
682
683
684
685    when (RegNext(io.forward(i).uop.loadWaitStrict)) {
686      io.forward(i).addrInvalidSqIdx := RegNext(io.forward(i).uop.sqIdx - 1.U)
687    } .elsewhen (addrInvalidFlag) {
688      io.forward(i).addrInvalidSqIdx.flag := Mux(!s2_differentFlag || addrInvalidSqIdx >= s2_deqPtrExt.value, s2_deqPtrExt.flag, s2_enqPtrExt.flag)
689      io.forward(i).addrInvalidSqIdx.value := addrInvalidSqIdx
690    } .otherwise {
691      // may be store inst has been written to sbuffer already.
692      io.forward(i).addrInvalidSqIdx := RegNext(io.forward(i).uop.sqIdx)
693    }
694    io.forward(i).addrInvalid := Mux(RegNext(io.forward(i).uop.loadWaitStrict), RegNext(hasInvalidAddr), addrInvalidFlag)
695
696    // data invalid sq index
697    // make chisel happy
698    val dataInvalidMaskRegWire = Wire(UInt(StoreQueueSize.W))
699    dataInvalidMaskRegWire := dataInvalidMaskReg
700    val dataInvalidFlag = dataInvalidMaskRegWire.orR
701
702    val dataInvalidSqIdx1 = OHToUInt(Reverse(PriorityEncoderOH(Reverse(dataInvalidMask1Reg))))
703    val dataInvalidSqIdx2 = OHToUInt(Reverse(PriorityEncoderOH(Reverse(dataInvalidMask2Reg))))
704    val dataInvalidSqIdx = Mux(dataInvalidMask2Reg.orR, dataInvalidSqIdx2, dataInvalidSqIdx1)
705
706    when (dataInvalidFlag) {
707      io.forward(i).dataInvalidSqIdx.flag := Mux(!s2_differentFlag || dataInvalidSqIdx >= s2_deqPtrExt.value, s2_deqPtrExt.flag, s2_enqPtrExt.flag)
708      io.forward(i).dataInvalidSqIdx.value := dataInvalidSqIdx
709    } .otherwise {
710      // may be store inst has been written to sbuffer already.
711      io.forward(i).dataInvalidSqIdx := RegNext(io.forward(i).uop.sqIdx)
712    }
713  }
714
715  /**
716    * Memory mapped IO / other uncached operations
717    *
718    * States:
719    * (1) writeback from store units: mark as pending
720    * (2) when they reach ROB's head, they can be sent to uncache channel
721    * (3) response from uncache channel: mark as datavalidmask.wen
722    * (4) writeback to ROB (and other units): mark as writebacked
723    * (5) ROB commits the instruction: same as normal instructions
724    */
725  //(2) when they reach ROB's head, they can be sent to uncache channel
726  // TODO: CAN NOT deal with vector mmio now!
727  val s_idle :: s_req :: s_resp :: s_wb :: s_wait :: Nil = Enum(5)
728  val uncacheState = RegInit(s_idle)
729  switch(uncacheState) {
730    is(s_idle) {
731      when(RegNext(io.rob.pendingst && pending(deqPtr) && allocated(deqPtr) && datavalid(deqPtr) && addrvalid(deqPtr))) {
732        uncacheState := s_req
733      }
734    }
735    is(s_req) {
736      when (io.uncache.req.fire) {
737        when (io.uncacheOutstanding) {
738          uncacheState := s_wb
739        } .otherwise {
740          uncacheState := s_resp
741        }
742      }
743    }
744    is(s_resp) {
745      when(io.uncache.resp.fire) {
746        uncacheState := s_wb
747      }
748    }
749    is(s_wb) {
750      when (io.mmioStout.fire || io.vecmmioStout.fire) {
751        uncacheState := s_wait
752      }
753    }
754    is(s_wait) {
755      // A MMIO store can always move cmtPtrExt as it must be ROB head
756      when(scommit > 0.U) {
757        uncacheState := s_idle // ready for next mmio
758      }
759    }
760  }
761  io.uncache.req.valid := uncacheState === s_req
762
763  io.uncache.req.bits := DontCare
764  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XWR
765  io.uncache.req.bits.addr := paddrModule.io.rdata(0) // data(deqPtr) -> rdata(0)
766  io.uncache.req.bits.data := shiftDataToLow(paddrModule.io.rdata(0), dataModule.io.rdata(0).data)
767  io.uncache.req.bits.mask := shiftMaskToLow(paddrModule.io.rdata(0), dataModule.io.rdata(0).mask)
768
769  // CBO op type check can be delayed for 1 cycle,
770  // as uncache op will not start in s_idle
771  val cbo_mmio_addr = paddrModule.io.rdata(0) >> 2 << 2 // clear lowest 2 bits for op
772  val cbo_mmio_op = 0.U //TODO
773  val cbo_mmio_data = cbo_mmio_addr | cbo_mmio_op
774  when(RegNext(LSUOpType.isCbo(uop(deqPtr).fuOpType))){
775    io.uncache.req.bits.addr := DontCare // TODO
776    io.uncache.req.bits.data := paddrModule.io.rdata(0)
777    io.uncache.req.bits.mask := DontCare // TODO
778  }
779
780  io.uncache.req.bits.atomic := atomic(RegNext(rdataPtrExtNext(0)).value)
781
782  when(io.uncache.req.fire){
783    // mmio store should not be committed until uncache req is sent
784    pending(deqPtr) := false.B
785
786    XSDebug(
787      p"uncache req: pc ${Hexadecimal(uop(deqPtr).pc)} " +
788      p"addr ${Hexadecimal(io.uncache.req.bits.addr)} " +
789      p"data ${Hexadecimal(io.uncache.req.bits.data)} " +
790      p"op ${Hexadecimal(io.uncache.req.bits.cmd)} " +
791      p"mask ${Hexadecimal(io.uncache.req.bits.mask)}\n"
792    )
793  }
794
795  // (3) response from uncache channel: mark as datavalid
796  io.uncache.resp.ready := true.B
797
798  // (4) scalar store: writeback to ROB (and other units): mark as writebacked
799  io.mmioStout.valid := uncacheState === s_wb && !isVec(deqPtr)
800  io.mmioStout.bits.uop := uop(deqPtr)
801  io.mmioStout.bits.uop.sqIdx := deqPtrExt(0)
802  io.mmioStout.bits.data := shiftDataToLow(paddrModule.io.rdata(0), dataModule.io.rdata(0).data) // dataModule.io.rdata.read(deqPtr)
803  io.mmioStout.bits.debug.isMMIO := true.B
804  io.mmioStout.bits.debug.paddr := DontCare
805  io.mmioStout.bits.debug.isPerfCnt := false.B
806  io.mmioStout.bits.debug.vaddr := DontCare
807  // Remove MMIO inst from store queue after MMIO request is being sent
808  // That inst will be traced by uncache state machine
809  when (io.mmioStout.fire) {
810    allocated(deqPtr) := false.B
811  }
812
813  // (4) or vector store:
814  // TODO: implement it!
815  io.vecmmioStout := DontCare
816  io.vecmmioStout.valid := uncacheState === s_wb && isVec(deqPtr)
817  io.vecmmioStout.bits.uop := uop(deqPtr)
818  io.vecmmioStout.bits.uop.sqIdx := deqPtrExt(0)
819  io.vecmmioStout.bits.data := shiftDataToLow(paddrModule.io.rdata(0), dataModule.io.rdata(0).data) // dataModule.io.rdata.read(deqPtr)
820  io.vecmmioStout.bits.debug.isMMIO := true.B
821  io.vecmmioStout.bits.debug.paddr := DontCare
822  io.vecmmioStout.bits.debug.isPerfCnt := false.B
823  io.vecmmioStout.bits.debug.vaddr := DontCare
824  // Remove MMIO inst from store queue after MMIO request is being sent
825  // That inst will be traced by uncache state machine
826  when (io.vecmmioStout.fire) {
827    allocated(deqPtr) := false.B
828  }
829
830  /**
831    * ROB commits store instructions (mark them as committed)
832    *
833    * (1) When store commits, mark it as committed.
834    * (2) They will not be cancelled and can be sent to lower level.
835    */
836  XSError(uncacheState =/= s_idle && uncacheState =/= s_wait && commitCount > 0.U,
837   "should not commit instruction when MMIO has not been finished\n")
838
839  val scalarcommitVec = WireInit(VecInit(Seq.fill(CommitWidth)(false.B)))
840  val veccommitVec = WireInit(VecInit(Seq.fill(CommitWidth)(false.B)))
841  // TODO: Deal with vector store mmio
842  for (i <- 0 until CommitWidth) {
843    val veccount = PopCount(veccommitVec.take(i))
844    when (allocated(cmtPtrExt(i).value) && isVec(cmtPtrExt(i).value) && isNotAfter(uop(cmtPtrExt(i).value).robIdx, RegNext(io.rob.pendingPtr)) && vecMbCommit(cmtPtrExt(i).value)) {
845      if (i == 0){
846        // TODO: fixme for vector mmio
847        when ((uncacheState === s_idle) || (uncacheState === s_wait && scommit > 0.U)){
848          committed(cmtPtrExt(0).value) := true.B
849          veccommitVec(i) := true.B
850        }
851      } else {
852        committed(cmtPtrExt(i).value) := true.B
853        veccommitVec(i) := veccommitVec(i - 1) || scalarcommitVec(i - 1)
854      }
855    } .elsewhen (scalarCommitCount > i.U - veccount) {
856      if (i == 0){
857        when ((uncacheState === s_idle) || (uncacheState === s_wait && scommit > 0.U)){
858          committed(cmtPtrExt(0).value) := true.B
859          scalarcommitVec(i) := true.B
860        }
861      } else {
862        committed(cmtPtrExt(i).value) := true.B
863        scalarcommitVec(i) := veccommitVec(i - 1) || scalarcommitVec(i - 1)
864      }
865    }
866  }
867
868  scalarCommitted := PopCount(scalarcommitVec)
869  vecCommitted := PopCount(veccommitVec)
870  commitCount := scalarCommitted + vecCommitted
871
872  cmtPtrExt := cmtPtrExt.map(_ + commitCount)
873
874  // committed stores will not be cancelled and can be sent to lower level.
875  // remove retired insts from sq, add retired store to sbuffer
876
877  // Read data from data module
878  // As store queue grows larger and larger, time needed to read data from data
879  // module keeps growing higher. Now we give data read a whole cycle.
880  val mmioStall = mmio(rdataPtrExt(0).value)
881  for (i <- 0 until EnsbufferWidth) {
882    val ptr = rdataPtrExt(i).value
883    dataBuffer.io.enq(i).valid := allocated(ptr) && committed(ptr) && (!isVec(ptr) || vecMbCommit(ptr)) && !mmioStall
884    // Note that store data/addr should both be valid after store's commit
885    assert(!dataBuffer.io.enq(i).valid || allvalid(ptr) || (allocated(ptr) && vecMbCommit(ptr)))
886    dataBuffer.io.enq(i).bits.addr     := paddrModule.io.rdata(i)
887    dataBuffer.io.enq(i).bits.vaddr    := vaddrModule.io.rdata(i)
888    dataBuffer.io.enq(i).bits.data     := dataModule.io.rdata(i).data
889    dataBuffer.io.enq(i).bits.mask     := dataModule.io.rdata(i).mask
890    dataBuffer.io.enq(i).bits.wline    := paddrModule.io.rlineflag(i)
891    dataBuffer.io.enq(i).bits.sqPtr    := rdataPtrExt(i)
892    dataBuffer.io.enq(i).bits.prefetch := prefetch(ptr)
893    dataBuffer.io.enq(i).bits.vecValid := !isVec(ptr) || vecDataValid(ptr) // scalar is always valid
894  }
895
896  // Send data stored in sbufferReqBitsReg to sbuffer
897  for (i <- 0 until EnsbufferWidth) {
898    io.sbuffer(i).valid := dataBuffer.io.deq(i).valid
899    dataBuffer.io.deq(i).ready := io.sbuffer(i).ready
900    // Write line request should have all 1 mask
901    assert(!(io.sbuffer(i).valid && io.sbuffer(i).bits.wline && io.sbuffer(i).bits.vecValid && !io.sbuffer(i).bits.mask.andR))
902    io.sbuffer(i).bits := DontCare
903    io.sbuffer(i).bits.cmd   := MemoryOpConstants.M_XWR
904    io.sbuffer(i).bits.addr  := dataBuffer.io.deq(i).bits.addr
905    io.sbuffer(i).bits.vaddr := dataBuffer.io.deq(i).bits.vaddr
906    io.sbuffer(i).bits.data  := dataBuffer.io.deq(i).bits.data
907    io.sbuffer(i).bits.mask  := dataBuffer.io.deq(i).bits.mask
908    io.sbuffer(i).bits.wline := dataBuffer.io.deq(i).bits.wline
909    io.sbuffer(i).bits.prefetch := dataBuffer.io.deq(i).bits.prefetch
910    io.sbuffer(i).bits.vecValid := dataBuffer.io.deq(i).bits.vecValid
911    // io.sbuffer(i).fire is RegNexted, as sbuffer data write takes 2 cycles.
912    // Before data write finish, sbuffer is unable to provide store to load
913    // forward data. As an workaround, deqPtrExt and allocated flag update
914    // is delayed so that load can get the right data from store queue.
915    val ptr = dataBuffer.io.deq(i).bits.sqPtr.value
916    when (RegNext(io.sbuffer(i).fire)) {
917      allocated(RegEnable(ptr, io.sbuffer(i).fire)) := false.B
918      XSDebug("sbuffer "+i+" fire: ptr %d\n", ptr)
919    }
920  }
921
922
923  // Initialize when unenabled difftest.
924  for (i <- 0 until EnsbufferWidth) {
925    io.sbufferVecDifftestInfo(i) := DontCare
926  }
927  // Consistent with the logic above.
928  // Only the vector store difftest required signal is separated from the rtl code.
929  if (env.EnableDifftest) {
930    for (i <- 0 until EnsbufferWidth) {
931      val ptr = rdataPtrExt(i).value
932      difftestBuffer.get.io.enq(i).valid := allocated(ptr) && committed(ptr) && (!isVec(ptr) || vecMbCommit(ptr)) && !mmioStall
933      difftestBuffer.get.io.enq(i).bits := uop(ptr)
934    }
935    for (i <- 0 until EnsbufferWidth) {
936      io.sbufferVecDifftestInfo(i).valid := difftestBuffer.get.io.deq(i).valid
937      difftestBuffer.get.io.deq(i).ready := io.sbufferVecDifftestInfo(i).ready
938
939      io.sbufferVecDifftestInfo(i).bits := difftestBuffer.get.io.deq(i).bits
940    }
941  }
942
943  (1 until EnsbufferWidth).foreach(i => when(io.sbuffer(i).fire) { assert(io.sbuffer(i - 1).fire) })
944  if (coreParams.dcacheParametersOpt.isEmpty) {
945    for (i <- 0 until EnsbufferWidth) {
946      val ptr = deqPtrExt(i).value
947      val ram = DifftestMem(64L * 1024 * 1024 * 1024, 8)
948      val wen = allocated(ptr) && committed(ptr) && !mmio(ptr)
949      val waddr = ((paddrModule.io.rdata(i) - "h80000000".U) >> 3).asUInt
950      val wdata = Mux(paddrModule.io.rdata(i)(3), dataModule.io.rdata(i).data(127, 64), dataModule.io.rdata(i).data(63, 0))
951      val wmask = Mux(paddrModule.io.rdata(i)(3), dataModule.io.rdata(i).mask(15, 8), dataModule.io.rdata(i).mask(7, 0))
952      when (wen) {
953        ram.write(waddr, wdata.asTypeOf(Vec(8, UInt(8.W))), wmask.asBools)
954      }
955    }
956  }
957
958  // Read vaddr for mem exception
959  io.exceptionAddr.vaddr  := exceptionBuffer.io.exceptionAddr.vaddr
960  io.exceptionAddr.gpaddr  := exceptionBuffer.io.exceptionAddr.gpaddr
961  io.exceptionAddr.vstart := exceptionBuffer.io.exceptionAddr.vstart
962  io.exceptionAddr.vl     := exceptionBuffer.io.exceptionAddr.vl
963
964  // vector commit or replay from
965  val vecCommittmp = Wire(Vec(StoreQueueSize, Vec(VecStorePipelineWidth, Bool())))
966  val vecCommit = Wire(Vec(StoreQueueSize, Bool()))
967  for (i <- 0 until StoreQueueSize) {
968    val fbk = io.vecFeedback
969    for (j <- 0 until VecStorePipelineWidth) {
970      vecCommittmp(i)(j) := fbk(j).valid && fbk(j).bits.isCommit && uop(i).robIdx === fbk(j).bits.robidx && uop(i).uopIdx === fbk(j).bits.uopidx
971    }
972    vecCommit(i) := vecCommittmp(i).reduce(_ || _)
973
974    when (vecCommit(i)) {
975      vecMbCommit(i) := true.B
976    }
977  }
978
979  // misprediction recovery / exception redirect
980  // invalidate sq term using robIdx
981  val needCancel = Wire(Vec(StoreQueueSize, Bool()))
982  for (i <- 0 until StoreQueueSize) {
983    needCancel(i) := uop(i).robIdx.needFlush(io.brqRedirect) && allocated(i) && !committed(i)
984    when (needCancel(i)) {
985      allocated(i) := false.B
986    }
987  }
988
989 /**
990* update pointers
991**/
992  val enqCancelValid = canEnqueue.zip(io.enq.req).map{case (v , x) =>
993    v && x.bits.robIdx.needFlush(io.brqRedirect)
994  }
995  val enqCancelNum = enqCancelValid.zip(io.enq.req).map{case (v, req) =>
996    Mux(v, req.bits.numLsElem, 0.U)
997  }
998  val lastEnqCancel = RegNext(enqCancelNum.reduce(_ + _)) // 1 cycle after redirect
999
1000  val lastCycleCancelCount = PopCount(RegNext(needCancel)) // 1 cycle after redirect
1001  val lastCycleRedirect = RegNext(io.brqRedirect.valid) // 1 cycle after redirect
1002  val enqNumber = validVStoreFlow.reduce(_ + _)
1003
1004  val lastlastCycleRedirect=RegNext(lastCycleRedirect)// 2 cycle after redirect
1005  val redirectCancelCount = RegEnable(lastCycleCancelCount + lastEnqCancel, lastCycleRedirect) // 2 cycle after redirect
1006
1007  when (lastlastCycleRedirect) {
1008    // we recover the pointers in 2 cycle after redirect for better timing
1009    enqPtrExt := VecInit(enqPtrExt.map(_ - redirectCancelCount))
1010  }.otherwise {
1011    // lastCycleRedirect.valid or nornal case
1012    // when lastCycleRedirect.valid, enqNumber === 0.U, enqPtrExt will not change
1013    enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
1014  }
1015  assert(!(lastCycleRedirect && enqNumber =/= 0.U))
1016
1017  deqPtrExt := deqPtrExtNext
1018  rdataPtrExt := rdataPtrExtNext
1019
1020  // val dequeueCount = Mux(io.sbuffer(1).fire, 2.U, Mux(io.sbuffer(0).fire || io.mmioStout.fire, 1.U, 0.U))
1021
1022  // If redirect at T0, sqCancelCnt is at T2
1023  io.sqCancelCnt := redirectCancelCount
1024  val ForceWriteUpper = Wire(UInt(log2Up(StoreQueueSize + 1).W))
1025  ForceWriteUpper := Constantin.createRecord(s"ForceWriteUpper_${p(XSCoreParamsKey).HartId}", initValue = 60)
1026  val ForceWriteLower = Wire(UInt(log2Up(StoreQueueSize + 1).W))
1027  ForceWriteLower := Constantin.createRecord(s"ForceWriteLower_${p(XSCoreParamsKey).HartId}", initValue = 55)
1028
1029  val valid_cnt = PopCount(allocated)
1030  io.force_write := RegNext(Mux(valid_cnt >= ForceWriteUpper, true.B, valid_cnt >= ForceWriteLower && io.force_write), init = false.B)
1031
1032  // io.sqempty will be used by sbuffer
1033  // We delay it for 1 cycle for better timing
1034  // When sbuffer need to check if it is empty, the pipeline is blocked, which means delay io.sqempty
1035  // for 1 cycle will also promise that sq is empty in that cycle
1036  io.sqEmpty := RegNext(
1037    enqPtrExt(0).value === deqPtrExt(0).value &&
1038    enqPtrExt(0).flag === deqPtrExt(0).flag
1039  )
1040  // perf counter
1041  QueuePerf(StoreQueueSize, validCount, !allowEnqueue)
1042  val vecValidVec = WireInit(VecInit((0 until StoreQueueSize).map(i => allocated(i) && isVec(i))))
1043  QueuePerf(StoreQueueSize, PopCount(vecValidVec), !allowEnqueue)
1044  io.sqFull := !allowEnqueue
1045  XSPerfAccumulate("mmioCycle", uncacheState =/= s_idle) // lq is busy dealing with uncache req
1046  XSPerfAccumulate("mmioCnt", io.uncache.req.fire)
1047  XSPerfAccumulate("mmio_wb_success", io.mmioStout.fire || io.vecmmioStout.fire)
1048  XSPerfAccumulate("mmio_wb_blocked", (io.mmioStout.valid && !io.mmioStout.ready) || (io.vecmmioStout.valid && !io.vecmmioStout.ready))
1049  XSPerfAccumulate("validEntryCnt", distanceBetween(enqPtrExt(0), deqPtrExt(0)))
1050  XSPerfAccumulate("cmtEntryCnt", distanceBetween(cmtPtrExt(0), deqPtrExt(0)))
1051  XSPerfAccumulate("nCmtEntryCnt", distanceBetween(enqPtrExt(0), cmtPtrExt(0)))
1052
1053  val perfValidCount = distanceBetween(enqPtrExt(0), deqPtrExt(0))
1054  val perfEvents = Seq(
1055    ("mmioCycle      ", uncacheState =/= s_idle),
1056    ("mmioCnt        ", io.uncache.req.fire),
1057    ("mmio_wb_success", io.mmioStout.fire || io.vecmmioStout.fire),
1058    ("mmio_wb_blocked", (io.mmioStout.valid && !io.mmioStout.ready) || (io.vecmmioStout.valid && !io.vecmmioStout.ready)),
1059    ("stq_1_4_valid  ", (perfValidCount < (StoreQueueSize.U/4.U))),
1060    ("stq_2_4_valid  ", (perfValidCount > (StoreQueueSize.U/4.U)) & (perfValidCount <= (StoreQueueSize.U/2.U))),
1061    ("stq_3_4_valid  ", (perfValidCount > (StoreQueueSize.U/2.U)) & (perfValidCount <= (StoreQueueSize.U*3.U/4.U))),
1062    ("stq_4_4_valid  ", (perfValidCount > (StoreQueueSize.U*3.U/4.U))),
1063  )
1064  generatePerfEvent()
1065
1066  // debug info
1067  XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt(0).flag, deqPtr)
1068
1069  def PrintFlag(flag: Bool, name: String): Unit = {
1070    when(flag) {
1071      XSDebug(false, true.B, name)
1072    }.otherwise {
1073      XSDebug(false, true.B, " ")
1074    }
1075  }
1076
1077  for (i <- 0 until StoreQueueSize) {
1078    XSDebug(i + ": pc %x va %x pa %x data %x ",
1079      uop(i).pc,
1080      debug_vaddr(i),
1081      debug_paddr(i),
1082      debug_data(i)
1083    )
1084    PrintFlag(allocated(i), "a")
1085    PrintFlag(allocated(i) && addrvalid(i), "a")
1086    PrintFlag(allocated(i) && datavalid(i), "d")
1087    PrintFlag(allocated(i) && committed(i), "c")
1088    PrintFlag(allocated(i) && pending(i), "p")
1089    PrintFlag(allocated(i) && mmio(i), "m")
1090    XSDebug(false, true.B, "\n")
1091  }
1092
1093}
1094