xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LSQWrapper.scala (revision 45f43e6e5f88874a7573ff096d1e5c2855bd16c7)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import utility._
24import xiangshan._
25import xiangshan.cache._
26import xiangshan.cache.{DCacheWordIO, DCacheLineIO, MemoryOpConstants}
27import xiangshan.cache.mmu.{TlbRequestIO, TlbHintIO}
28import xiangshan.mem._
29import xiangshan.backend.rob.RobLsqIO
30
31class ExceptionAddrIO(implicit p: Parameters) extends XSBundle {
32  val isStore = Input(Bool())
33  val vaddr = Output(UInt(VAddrBits.W))
34}
35
36class FwdEntry extends Bundle {
37  val validFast = Bool() // validFast is generated the same cycle with query
38  val valid = Bool() // valid is generated 1 cycle after query request
39  val data = UInt(8.W) // data is generated 1 cycle after query request
40}
41
42// inflight miss block reqs
43class InflightBlockInfo(implicit p: Parameters) extends XSBundle {
44  val block_addr = UInt(PAddrBits.W)
45  val valid = Bool()
46}
47
48class LsqEnqIO(implicit p: Parameters) extends XSBundle {
49  val canAccept = Output(Bool())
50  val needAlloc = Vec(exuParameters.LsExuCnt, Input(UInt(2.W)))
51  val req       = Vec(exuParameters.LsExuCnt, Flipped(ValidIO(new MicroOp)))
52  val resp      = Vec(exuParameters.LsExuCnt, Output(new LSIdx))
53}
54
55// Load / Store Queue Wrapper for XiangShan Out of Order LSU
56class LsqWrapper(implicit p: Parameters) extends XSModule with HasDCacheParameters with HasPerfEvents {
57  val io = IO(new Bundle() {
58    val hartId = Input(UInt(8.W))
59    val brqRedirect = Flipped(ValidIO(new Redirect))
60    val enq = new LsqEnqIO
61    val ldu = new Bundle() {
62        val stld_nuke_query = Vec(LoadPipelineWidth, Flipped(new LoadNukeQueryIO)) // from load_s2
63        val ldld_nuke_query = Vec(LoadPipelineWidth, Flipped(new LoadNukeQueryIO)) // from load_s2
64        val ldin = Vec(LoadPipelineWidth, Flipped(Decoupled(new LqWriteBundle))) // from load_s3
65    }
66    val sta = new Bundle() {
67      val storeMaskIn = Vec(StorePipelineWidth, Flipped(Valid(new StoreMaskBundle))) // from store_s0, store mask, send to sq from rs
68      val storeAddrIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) // from store_s1
69      val storeAddrInRe = Vec(StorePipelineWidth, Input(new LsPipelineBundle())) // from store_s2
70    }
71    val std = new Bundle() {
72      val storeDataIn = Vec(StorePipelineWidth, Flipped(Valid(new ExuOutput))) // from store_s0, store data, send to sq from rs
73    }
74    val ldout = Vec(LoadPipelineWidth, DecoupledIO(new ExuOutput))
75    val ld_raw_data = Vec(LoadPipelineWidth, Output(new LoadDataFromLQBundle))
76    val replay = Vec(LoadPipelineWidth, Decoupled(new LsPipelineBundle))
77    val sbuffer = Vec(EnsbufferWidth, Decoupled(new DCacheWordReqWithVaddrAndPfFlag))
78    val forward = Vec(LoadPipelineWidth, Flipped(new PipeLoadForwardQueryIO))
79    val rob = Flipped(new RobLsqIO)
80    val nuke_rollback = Output(Valid(new Redirect))
81    val nack_rollback = Output(Valid(new Redirect))
82    val release = Flipped(Valid(new Release))
83    val refill = Flipped(Valid(new Refill))
84    val tl_d_channel  = Input(new DcacheToLduForwardIO)
85    val uncacheOutstanding = Input(Bool())
86    val uncache = new UncacheWordIO
87    val mmioStout = DecoupledIO(new ExuOutput) // writeback uncached store
88    val sqEmpty = Output(Bool())
89    val lq_rep_full = Output(Bool())
90    val sqFull = Output(Bool())
91    val lqFull = Output(Bool())
92    val sqCancelCnt = Output(UInt(log2Up(StoreQueueSize+1).W))
93    val lqCancelCnt = Output(UInt(log2Up(VirtualLoadQueueSize+1).W))
94    val lqDeq = Output(UInt(log2Up(CommitWidth + 1).W))
95    val sqDeq = Output(UInt(log2Ceil(EnsbufferWidth + 1).W))
96    val lqCanAccept = Output(Bool())
97    val sqCanAccept = Output(Bool())
98    val exceptionAddr = new ExceptionAddrIO
99    val issuePtrExt = Output(new SqPtr)
100    val l2_hint = Input(Valid(new L2ToL1Hint()))
101    val tlb_hint = Flipped(new TlbHintIO)
102    val force_write = Output(Bool())
103    val lqEmpty = Output(Bool())
104    val debugTopDown = new LoadQueueTopDownIO
105  })
106
107  val loadQueue = Module(new LoadQueue)
108  val storeQueue = Module(new StoreQueue)
109
110  storeQueue.io.hartId := io.hartId
111  storeQueue.io.uncacheOutstanding := io.uncacheOutstanding
112
113
114  dontTouch(loadQueue.io.tlbReplayDelayCycleCtrl)
115  val tlbReplayDelayCycleCtrl = WireInit(VecInit(Seq(14.U(ReSelectLen.W), 0.U(ReSelectLen.W), 125.U(ReSelectLen.W), 0.U(ReSelectLen.W))))
116  loadQueue.io.tlbReplayDelayCycleCtrl := tlbReplayDelayCycleCtrl
117
118  // io.enq logic
119  // LSQ: send out canAccept when both load queue and store queue are ready
120  // Dispatch: send instructions to LSQ only when they are ready
121  io.enq.canAccept := loadQueue.io.enq.canAccept && storeQueue.io.enq.canAccept
122  io.lqCanAccept := loadQueue.io.enq.canAccept
123  io.sqCanAccept := storeQueue.io.enq.canAccept
124  loadQueue.io.enq.sqCanAccept := storeQueue.io.enq.canAccept
125  storeQueue.io.enq.lqCanAccept := loadQueue.io.enq.canAccept
126  for (i <- io.enq.req.indices) {
127    loadQueue.io.enq.needAlloc(i)      := io.enq.needAlloc(i)(0)
128    loadQueue.io.enq.req(i).valid      := io.enq.needAlloc(i)(0) && io.enq.req(i).valid
129    loadQueue.io.enq.req(i).bits       := io.enq.req(i).bits
130    loadQueue.io.enq.req(i).bits.sqIdx := storeQueue.io.enq.resp(i)
131
132    storeQueue.io.enq.needAlloc(i)      := io.enq.needAlloc(i)(1)
133    storeQueue.io.enq.req(i).valid      := io.enq.needAlloc(i)(1) && io.enq.req(i).valid
134    storeQueue.io.enq.req(i).bits       := io.enq.req(i).bits
135    storeQueue.io.enq.req(i).bits       := io.enq.req(i).bits
136    storeQueue.io.enq.req(i).bits.lqIdx := loadQueue.io.enq.resp(i)
137
138    io.enq.resp(i).lqIdx := loadQueue.io.enq.resp(i)
139    io.enq.resp(i).sqIdx := storeQueue.io.enq.resp(i)
140  }
141
142  // store queue wiring
143  storeQueue.io.brqRedirect <> io.brqRedirect
144  storeQueue.io.storeAddrIn <> io.sta.storeAddrIn // from store_s1
145  storeQueue.io.storeAddrInRe <> io.sta.storeAddrInRe // from store_s2
146  storeQueue.io.storeDataIn <> io.std.storeDataIn // from store_s0
147  storeQueue.io.storeMaskIn <> io.sta.storeMaskIn // from store_s0
148  storeQueue.io.sbuffer     <> io.sbuffer
149  storeQueue.io.mmioStout   <> io.mmioStout
150  storeQueue.io.rob         <> io.rob
151  storeQueue.io.exceptionAddr.isStore := DontCare
152  storeQueue.io.sqCancelCnt <> io.sqCancelCnt
153  storeQueue.io.sqDeq       <> io.sqDeq
154  storeQueue.io.sqEmpty     <> io.sqEmpty
155  storeQueue.io.sqFull      <> io.sqFull
156  storeQueue.io.forward     <> io.forward // overlap forwardMask & forwardData, DO NOT CHANGE SEQUENCE
157  storeQueue.io.force_write <> io.force_write
158
159  /* <------- DANGEROUS: Don't change sequence here ! -------> */
160
161  //  load queue wiring
162  loadQueue.io.redirect            <> io.brqRedirect
163  loadQueue.io.ldu                 <> io.ldu
164  loadQueue.io.ldout               <> io.ldout
165  loadQueue.io.ld_raw_data         <> io.ld_raw_data
166  loadQueue.io.rob                 <> io.rob
167  loadQueue.io.nuke_rollback       <> io.nuke_rollback
168  loadQueue.io.nack_rollback       <> io.nack_rollback
169  loadQueue.io.replay              <> io.replay
170  loadQueue.io.refill              <> io.refill
171  loadQueue.io.tl_d_channel        <> io.tl_d_channel
172  loadQueue.io.release             <> io.release
173  loadQueue.io.exceptionAddr.isStore := DontCare
174  loadQueue.io.lqCancelCnt         <> io.lqCancelCnt
175  loadQueue.io.sq.stAddrReadySqPtr <> storeQueue.io.stAddrReadySqPtr
176  loadQueue.io.sq.stAddrReadyVec   <> storeQueue.io.stAddrReadyVec
177  loadQueue.io.sq.stDataReadySqPtr <> storeQueue.io.stDataReadySqPtr
178  loadQueue.io.sq.stDataReadyVec   <> storeQueue.io.stDataReadyVec
179  loadQueue.io.sq.stIssuePtr       <> storeQueue.io.stIssuePtr
180  loadQueue.io.sq.sqEmpty          <> storeQueue.io.sqEmpty
181  loadQueue.io.sta.storeAddrIn     <> io.sta.storeAddrIn // store_s1
182  loadQueue.io.std.storeDataIn     <> io.std.storeDataIn // store_s0
183  loadQueue.io.lqFull              <> io.lqFull
184  loadQueue.io.lq_rep_full         <> io.lq_rep_full
185  loadQueue.io.lqDeq               <> io.lqDeq
186  loadQueue.io.l2_hint             <> io.l2_hint
187  loadQueue.io.tlb_hint            <> io.tlb_hint
188  loadQueue.io.lqEmpty             <> io.lqEmpty
189
190  // rob commits for lsq is delayed for two cycles, which causes the delayed update for deqPtr in lq/sq
191  // s0: commit
192  // s1:               exception find
193  // s2:               exception triggered
194  // s3: ptr updated & new address
195  // address will be used at the next cycle after exception is triggered
196  io.exceptionAddr.vaddr := Mux(RegNext(io.exceptionAddr.isStore), storeQueue.io.exceptionAddr.vaddr, loadQueue.io.exceptionAddr.vaddr)
197  io.issuePtrExt := storeQueue.io.stAddrReadySqPtr
198
199  // naive uncache arbiter
200  val s_idle :: s_load :: s_store :: Nil = Enum(3)
201  val pendingstate = RegInit(s_idle)
202
203  switch(pendingstate){
204    is(s_idle){
205      when(io.uncache.req.fire){
206        pendingstate := Mux(loadQueue.io.uncache.req.valid, s_load,
207                          Mux(io.uncacheOutstanding, s_idle, s_store))
208      }
209    }
210    is(s_load){
211      when(io.uncache.resp.fire){
212        pendingstate := s_idle
213      }
214    }
215    is(s_store){
216      when(io.uncache.resp.fire){
217        pendingstate := s_idle
218      }
219    }
220  }
221
222  loadQueue.io.uncache := DontCare
223  storeQueue.io.uncache := DontCare
224  loadQueue.io.uncache.req.ready := false.B
225  storeQueue.io.uncache.req.ready := false.B
226  loadQueue.io.uncache.resp.valid := false.B
227  storeQueue.io.uncache.resp.valid := false.B
228  when(loadQueue.io.uncache.req.valid){
229    io.uncache.req <> loadQueue.io.uncache.req
230  }.otherwise{
231    io.uncache.req <> storeQueue.io.uncache.req
232  }
233  when (io.uncacheOutstanding) {
234    io.uncache.resp <> loadQueue.io.uncache.resp
235  } .otherwise {
236    when(pendingstate === s_load){
237      io.uncache.resp <> loadQueue.io.uncache.resp
238    }.otherwise{
239      io.uncache.resp <> storeQueue.io.uncache.resp
240    }
241  }
242
243  loadQueue.io.debugTopDown <> io.debugTopDown
244
245  assert(!(loadQueue.io.uncache.req.valid && storeQueue.io.uncache.req.valid))
246  assert(!(loadQueue.io.uncache.resp.valid && storeQueue.io.uncache.resp.valid))
247  when (!io.uncacheOutstanding) {
248    assert(!((loadQueue.io.uncache.resp.valid || storeQueue.io.uncache.resp.valid) && pendingstate === s_idle))
249  }
250
251
252  val perfEvents = Seq(loadQueue, storeQueue).flatMap(_.getPerfEvents)
253  generatePerfEvent()
254}
255
256class LsqEnqCtrl(implicit p: Parameters) extends XSModule {
257  val io = IO(new Bundle {
258    val redirect = Flipped(ValidIO(new Redirect))
259    // to dispatch
260    val enq = new LsqEnqIO
261    // from `memBlock.io.lqDeq
262    val lcommit = Input(UInt(log2Up(CommitWidth + 1).W))
263    // from `memBlock.io.sqDeq`
264    val scommit = Input(UInt(log2Ceil(EnsbufferWidth + 1).W))
265    // from/tp lsq
266    val lqCancelCnt = Input(UInt(log2Up(VirtualLoadQueueSize + 1).W))
267    val sqCancelCnt = Input(UInt(log2Up(StoreQueueSize + 1).W))
268    val enqLsq = Flipped(new LsqEnqIO)
269  })
270
271  val lqPtr = RegInit(0.U.asTypeOf(new LqPtr))
272  val sqPtr = RegInit(0.U.asTypeOf(new SqPtr))
273  val lqCounter = RegInit(VirtualLoadQueueSize.U(log2Up(VirtualLoadQueueSize + 1).W))
274  val sqCounter = RegInit(StoreQueueSize.U(log2Up(StoreQueueSize + 1).W))
275  val canAccept = RegInit(false.B)
276
277  val loadEnqNumber = PopCount(io.enq.req.zip(io.enq.needAlloc).map(x => x._1.valid && x._2(0)))
278  val storeEnqNumber = PopCount(io.enq.req.zip(io.enq.needAlloc).map(x => x._1.valid && x._2(1)))
279
280  // How to update ptr and counter:
281  // (1) by default, updated according to enq/commit
282  // (2) when redirect and dispatch queue is empty, update according to lsq
283  val t1_redirect = RegNext(io.redirect.valid)
284  val t2_redirect = RegNext(t1_redirect)
285  val t2_update = t2_redirect && !VecInit(io.enq.needAlloc.map(_.orR)).asUInt.orR
286  val t3_update = RegNext(t2_update)
287  val t3_lqCancelCnt = RegNext(io.lqCancelCnt)
288  val t3_sqCancelCnt = RegNext(io.sqCancelCnt)
289  when (t3_update) {
290    lqPtr := lqPtr - t3_lqCancelCnt
291    lqCounter := lqCounter + io.lcommit + t3_lqCancelCnt
292    sqPtr := sqPtr - t3_sqCancelCnt
293    sqCounter := sqCounter + io.scommit + t3_sqCancelCnt
294  }.elsewhen (!io.redirect.valid && io.enq.canAccept) {
295    lqPtr := lqPtr + loadEnqNumber
296    lqCounter := lqCounter + io.lcommit - loadEnqNumber
297    sqPtr := sqPtr + storeEnqNumber
298    sqCounter := sqCounter + io.scommit - storeEnqNumber
299  }.otherwise {
300    lqCounter := lqCounter + io.lcommit
301    sqCounter := sqCounter + io.scommit
302  }
303
304
305  val maxAllocate = Seq(exuParameters.LduCnt, exuParameters.StuCnt).max
306  val ldCanAccept = lqCounter >= loadEnqNumber +& maxAllocate.U
307  val sqCanAccept = sqCounter >= storeEnqNumber +& maxAllocate.U
308  // It is possible that t3_update and enq are true at the same clock cycle.
309  // For example, if redirect.valid lasts more than one clock cycle,
310  // after the last redirect, new instructions may enter but previously redirect
311  // has not been resolved (updated according to the cancel count from LSQ).
312  // To solve the issue easily, we block enqueue when t3_update, which is RegNext(t2_update).
313  io.enq.canAccept := RegNext(ldCanAccept && sqCanAccept && !t2_update)
314  val lqOffset = Wire(Vec(io.enq.resp.length, UInt(log2Up(maxAllocate + 1).W)))
315  val sqOffset = Wire(Vec(io.enq.resp.length, UInt(log2Up(maxAllocate + 1).W)))
316  for ((resp, i) <- io.enq.resp.zipWithIndex) {
317    lqOffset(i) := PopCount(io.enq.needAlloc.take(i).map(a => a(0)))
318    resp.lqIdx := lqPtr + lqOffset(i)
319    sqOffset(i) := PopCount(io.enq.needAlloc.take(i).map(a => a(1)))
320    resp.sqIdx := sqPtr + sqOffset(i)
321  }
322
323  io.enqLsq.needAlloc := RegNext(io.enq.needAlloc)
324  io.enqLsq.req.zip(io.enq.req).zip(io.enq.resp).foreach{ case ((toLsq, enq), resp) =>
325    val do_enq = enq.valid && !io.redirect.valid && io.enq.canAccept
326    toLsq.valid := RegNext(do_enq)
327    toLsq.bits := RegEnable(enq.bits, do_enq)
328    toLsq.bits.lqIdx := RegEnable(resp.lqIdx, do_enq)
329    toLsq.bits.sqIdx := RegEnable(resp.sqIdx, do_enq)
330  }
331
332}