xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LSQWrapper.scala (revision 272ec6b14a832d392220dc0e9441d1e03bb1dcb1)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import utility._
24import xiangshan._
25import xiangshan.backend.Bundles.{DynInst, MemExuOutput}
26import xiangshan.cache._
27import xiangshan.cache.{DCacheWordIO, DCacheLineIO, MemoryOpConstants}
28import xiangshan.cache.mmu.{TlbRequestIO, TlbHintIO}
29import xiangshan.mem._
30import xiangshan.backend._
31import xiangshan.backend.rob.RobLsqIO
32
33class ExceptionAddrIO(implicit p: Parameters) extends XSBundle {
34  val isStore = Input(Bool())
35  val vaddr = Output(UInt(VAddrBits.W))
36}
37
38class FwdEntry extends Bundle {
39  val validFast = Bool() // validFast is generated the same cycle with query
40  val valid = Bool() // valid is generated 1 cycle after query request
41  val data = UInt(8.W) // data is generated 1 cycle after query request
42}
43
44// inflight miss block reqs
45class InflightBlockInfo(implicit p: Parameters) extends XSBundle {
46  val block_addr = UInt(PAddrBits.W)
47  val valid = Bool()
48}
49
50class LsqEnqIO(implicit p: Parameters) extends MemBlockBundle {
51  val canAccept = Output(Bool())
52  val needAlloc = Vec(LSQEnqWidth, Input(UInt(2.W)))
53  val req       = Vec(LSQEnqWidth, Flipped(ValidIO(new DynInst)))
54  val resp      = Vec(LSQEnqWidth, Output(new LSIdx))
55}
56
57// Load / Store Queue Wrapper for XiangShan Out of Order LSU
58class LsqWrapper(implicit p: Parameters) extends XSModule with HasDCacheParameters with HasPerfEvents {
59  val io = IO(new Bundle() {
60    val hartId = Input(UInt(8.W))
61    val brqRedirect = Flipped(ValidIO(new Redirect))
62    val enq = new LsqEnqIO
63    val ldu = new Bundle() {
64        val stld_nuke_query = Vec(LoadPipelineWidth, Flipped(new LoadNukeQueryIO)) // from load_s2
65        val ldld_nuke_query = Vec(LoadPipelineWidth, Flipped(new LoadNukeQueryIO)) // from load_s2
66        val ldin = Vec(LoadPipelineWidth, Flipped(Decoupled(new LqWriteBundle))) // from load_s3
67    }
68    val sta = new Bundle() {
69      val storeMaskIn = Vec(StorePipelineWidth, Flipped(Valid(new StoreMaskBundle))) // from store_s0, store mask, send to sq from rs
70      val storeAddrIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) // from store_s1
71      val storeAddrInRe = Vec(StorePipelineWidth, Input(new LsPipelineBundle())) // from store_s2
72      val vecStoreAddrIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle)))
73    }
74    val std = new Bundle() {
75      val storeDataIn = Vec(StorePipelineWidth, Flipped(Valid(new MemExuOutput))) // from store_s0, store data, send to sq from rs
76    }
77    val ldout = Vec(LoadPipelineWidth, DecoupledIO(new MemExuOutput))
78    val ld_raw_data = Vec(LoadPipelineWidth, Output(new LoadDataFromLQBundle))
79    val replay = Vec(LoadPipelineWidth, Decoupled(new LsPipelineBundle))
80    val sbuffer = Vec(EnsbufferWidth, Decoupled(new DCacheWordReqWithVaddrAndPfFlag))
81    val forward = Vec(LoadPipelineWidth, Flipped(new PipeLoadForwardQueryIO))
82    val rob = Flipped(new RobLsqIO)
83    val nuke_rollback = Output(Valid(new Redirect))
84    val nack_rollback = Output(Valid(new Redirect))
85    val release = Flipped(Valid(new Release))
86    val refill = Flipped(Valid(new Refill))
87    val tl_d_channel  = Input(new DcacheToLduForwardIO)
88    val uncacheOutstanding = Input(Bool())
89    val uncache = new UncacheWordIO
90    val mmioStout = DecoupledIO(new MemExuOutput) // writeback uncached store
91    val sqEmpty = Output(Bool())
92    val lq_rep_full = Output(Bool())
93    val sqFull = Output(Bool())
94    val lqFull = Output(Bool())
95    val sqCancelCnt = Output(UInt(log2Up(StoreQueueSize+1).W))
96    val lqCancelCnt = Output(UInt(log2Up(VirtualLoadQueueSize+1).W))
97    val lqDeq = Output(UInt(log2Up(CommitWidth + 1).W))
98    val sqDeq = Output(UInt(log2Ceil(EnsbufferWidth + 1).W))
99    val lqCanAccept = Output(Bool())
100    val sqCanAccept = Output(Bool())
101    val lqDeqPtr = Output(new LqPtr)
102    val sqDeqPtr = Output(new SqPtr)
103    val exceptionAddr = new ExceptionAddrIO
104    val trigger = Vec(LoadPipelineWidth, new LqTriggerIO)
105    val issuePtrExt = Output(new SqPtr)
106    val l2_hint = Input(Valid(new L2ToL1Hint()))
107    val tlb_hint = Flipped(new TlbHintIO)
108    val force_write = Output(Bool())
109    val lqEmpty = Output(Bool())
110
111    // vector
112    val vecWriteback = Flipped(ValidIO(new MemExuOutput(isVector = true)))
113    val vecStoreRetire = Flipped(ValidIO(new SqPtr))
114    val vecMMIOReplay = Vec(VecLoadPipelineWidth, DecoupledIO(new LsPipelineBundle()))
115
116    // top-down
117    val debugTopDown = new LoadQueueTopDownIO
118  })
119
120  val loadQueue = Module(new LoadQueue)
121  val storeQueue = Module(new StoreQueue)
122
123  storeQueue.io.hartId := io.hartId
124  storeQueue.io.uncacheOutstanding := io.uncacheOutstanding
125
126
127  dontTouch(loadQueue.io.tlbReplayDelayCycleCtrl)
128  // Todo: imm
129  val tlbReplayDelayCycleCtrl = WireInit(VecInit(Seq(14.U(ReSelectLen.W), 0.U(ReSelectLen.W), 125.U(ReSelectLen.W), 0.U(ReSelectLen.W))))
130  loadQueue.io.tlbReplayDelayCycleCtrl := tlbReplayDelayCycleCtrl
131
132  // io.enq logic
133  // LSQ: send out canAccept when both load queue and store queue are ready
134  // Dispatch: send instructions to LSQ only when they are ready
135  io.enq.canAccept := loadQueue.io.enq.canAccept && storeQueue.io.enq.canAccept
136  io.lqCanAccept := loadQueue.io.enq.canAccept
137  io.sqCanAccept := storeQueue.io.enq.canAccept
138  loadQueue.io.enq.sqCanAccept := storeQueue.io.enq.canAccept
139  storeQueue.io.enq.lqCanAccept := loadQueue.io.enq.canAccept
140  io.lqDeqPtr := loadQueue.io.lqDeqPtr
141  io.sqDeqPtr := storeQueue.io.sqDeqPtr
142  for (i <- io.enq.req.indices) {
143    loadQueue.io.enq.needAlloc(i)      := io.enq.needAlloc(i)(0)
144    loadQueue.io.enq.req(i).valid      := io.enq.needAlloc(i)(0) && io.enq.req(i).valid
145    loadQueue.io.enq.req(i).bits       := io.enq.req(i).bits
146    loadQueue.io.enq.req(i).bits.sqIdx := storeQueue.io.enq.resp(i)
147
148    storeQueue.io.enq.needAlloc(i)      := io.enq.needAlloc(i)(1)
149    storeQueue.io.enq.req(i).valid      := io.enq.needAlloc(i)(1) && io.enq.req(i).valid
150    storeQueue.io.enq.req(i).bits       := io.enq.req(i).bits
151    storeQueue.io.enq.req(i).bits       := io.enq.req(i).bits
152    storeQueue.io.enq.req(i).bits.lqIdx := loadQueue.io.enq.resp(i)
153
154    io.enq.resp(i).lqIdx := loadQueue.io.enq.resp(i)
155    io.enq.resp(i).sqIdx := storeQueue.io.enq.resp(i)
156  }
157
158  // store queue wiring
159  storeQueue.io.brqRedirect <> io.brqRedirect
160  storeQueue.io.storeAddrIn <> io.sta.storeAddrIn // from store_s1
161  storeQueue.io.vecStoreAddrIn  <> io.sta.vecStoreAddrIn // store_s1
162  storeQueue.io.storeAddrInRe <> io.sta.storeAddrInRe // from store_s2
163  storeQueue.io.storeDataIn <> io.std.storeDataIn // from store_s0
164  storeQueue.io.storeMaskIn <> io.sta.storeMaskIn // from store_s0
165  storeQueue.io.sbuffer     <> io.sbuffer
166  storeQueue.io.mmioStout   <> io.mmioStout
167  storeQueue.io.rob         <> io.rob
168  storeQueue.io.exceptionAddr.isStore := DontCare
169  storeQueue.io.sqCancelCnt <> io.sqCancelCnt
170  storeQueue.io.sqDeq       <> io.sqDeq
171  storeQueue.io.sqEmpty     <> io.sqEmpty
172  storeQueue.io.sqFull      <> io.sqFull
173  storeQueue.io.forward     <> io.forward // overlap forwardMask & forwardData, DO NOT CHANGE SEQUENCE
174  storeQueue.io.force_write <> io.force_write
175  storeQueue.io.vecStoreRetire <> io.vecStoreRetire
176
177  /* <------- DANGEROUS: Don't change sequence here ! -------> */
178
179  //  load queue wiring
180  loadQueue.io.redirect            <> io.brqRedirect
181  loadQueue.io.ldu                 <> io.ldu
182  loadQueue.io.ldout               <> io.ldout
183  loadQueue.io.ld_raw_data         <> io.ld_raw_data
184  loadQueue.io.rob                 <> io.rob
185  loadQueue.io.nuke_rollback       <> io.nuke_rollback
186  loadQueue.io.nack_rollback       <> io.nack_rollback
187  loadQueue.io.replay              <> io.replay
188  loadQueue.io.refill              <> io.refill
189  loadQueue.io.tl_d_channel        <> io.tl_d_channel
190  loadQueue.io.release             <> io.release
191  loadQueue.io.trigger             <> io.trigger
192  loadQueue.io.exceptionAddr.isStore := DontCare
193  loadQueue.io.lqCancelCnt         <> io.lqCancelCnt
194  loadQueue.io.sq.stAddrReadySqPtr <> storeQueue.io.stAddrReadySqPtr
195  loadQueue.io.sq.stAddrReadyVec   <> storeQueue.io.stAddrReadyVec
196  loadQueue.io.sq.stDataReadySqPtr <> storeQueue.io.stDataReadySqPtr
197  loadQueue.io.sq.stDataReadyVec   <> storeQueue.io.stDataReadyVec
198  loadQueue.io.sq.stIssuePtr       <> storeQueue.io.stIssuePtr
199  loadQueue.io.sq.sqEmpty          <> storeQueue.io.sqEmpty
200  loadQueue.io.sta.storeAddrIn     <> io.sta.storeAddrIn // store_s1
201  loadQueue.io.sta.vecStoreAddrIn  <> io.sta.vecStoreAddrIn // store_s1
202  loadQueue.io.std.storeDataIn     <> io.std.storeDataIn // store_s0
203  loadQueue.io.lqFull              <> io.lqFull
204  loadQueue.io.lq_rep_full         <> io.lq_rep_full
205  loadQueue.io.lqDeq               <> io.lqDeq
206  loadQueue.io.l2_hint             <> io.l2_hint
207  loadQueue.io.tlb_hint            <> io.tlb_hint
208  loadQueue.io.lqEmpty             <> io.lqEmpty
209  loadQueue.io.vecWriteback        <> io.vecWriteback
210  loadQueue.io.vecMMIOReplay       <> io.vecMMIOReplay
211
212  // rob commits for lsq is delayed for two cycles, which causes the delayed update for deqPtr in lq/sq
213  // s0: commit
214  // s1:               exception find
215  // s2:               exception triggered
216  // s3: ptr updated & new address
217  // address will be used at the next cycle after exception is triggered
218  io.exceptionAddr.vaddr := Mux(RegNext(io.exceptionAddr.isStore), storeQueue.io.exceptionAddr.vaddr, loadQueue.io.exceptionAddr.vaddr)
219  io.issuePtrExt := storeQueue.io.stAddrReadySqPtr
220
221  // naive uncache arbiter
222  val s_idle :: s_load :: s_store :: Nil = Enum(3)
223  val pendingstate = RegInit(s_idle)
224
225  switch(pendingstate){
226    is(s_idle){
227      when(io.uncache.req.fire){
228        pendingstate := Mux(loadQueue.io.uncache.req.valid, s_load,
229                          Mux(io.uncacheOutstanding, s_idle, s_store))
230      }
231    }
232    is(s_load){
233      when(io.uncache.resp.fire){
234        pendingstate := s_idle
235      }
236    }
237    is(s_store){
238      when(io.uncache.resp.fire){
239        pendingstate := s_idle
240      }
241    }
242  }
243
244  loadQueue.io.uncache := DontCare
245  storeQueue.io.uncache := DontCare
246  loadQueue.io.uncache.req.ready := false.B
247  storeQueue.io.uncache.req.ready := false.B
248  loadQueue.io.uncache.resp.valid := false.B
249  storeQueue.io.uncache.resp.valid := false.B
250  when(loadQueue.io.uncache.req.valid){
251    io.uncache.req <> loadQueue.io.uncache.req
252  }.otherwise{
253    io.uncache.req <> storeQueue.io.uncache.req
254  }
255  when (io.uncacheOutstanding) {
256    io.uncache.resp <> loadQueue.io.uncache.resp
257  } .otherwise {
258    when(pendingstate === s_load){
259      io.uncache.resp <> loadQueue.io.uncache.resp
260    }.otherwise{
261      io.uncache.resp <> storeQueue.io.uncache.resp
262    }
263  }
264
265  loadQueue.io.debugTopDown <> io.debugTopDown
266
267  assert(!(loadQueue.io.uncache.req.valid && storeQueue.io.uncache.req.valid))
268  assert(!(loadQueue.io.uncache.resp.valid && storeQueue.io.uncache.resp.valid))
269  when (!io.uncacheOutstanding) {
270    assert(!((loadQueue.io.uncache.resp.valid || storeQueue.io.uncache.resp.valid) && pendingstate === s_idle))
271  }
272
273
274  val perfEvents = Seq(loadQueue, storeQueue).flatMap(_.getPerfEvents)
275  generatePerfEvent()
276}
277
278class LsqEnqCtrl(implicit p: Parameters) extends XSModule {
279  val io = IO(new Bundle {
280    val redirect = Flipped(ValidIO(new Redirect))
281    // to dispatch
282    val enq = new LsqEnqIO
283    // from `memBlock.io.lqDeq
284    val lcommit = Input(UInt(log2Up(CommitWidth + 1).W))
285    // from `memBlock.io.sqDeq`
286    val scommit = Input(UInt(log2Ceil(EnsbufferWidth + 1).W))
287    // from/tp lsq
288    val lqCancelCnt = Input(UInt(log2Up(VirtualLoadQueueSize + 1).W))
289    val sqCancelCnt = Input(UInt(log2Up(StoreQueueSize + 1).W))
290    val enqLsq = Flipped(new LsqEnqIO)
291  })
292
293  val lqPtr = RegInit(0.U.asTypeOf(new LqPtr))
294  val sqPtr = RegInit(0.U.asTypeOf(new SqPtr))
295  val lqCounter = RegInit(VirtualLoadQueueSize.U(log2Up(VirtualLoadQueueSize + 1).W))
296  val sqCounter = RegInit(StoreQueueSize.U(log2Up(StoreQueueSize + 1).W))
297  val canAccept = RegInit(false.B)
298
299  val loadEnqVec = io.enq.req.zip(io.enq.needAlloc).map(x => x._1.valid && x._2(0))
300  val storeEnqVec = io.enq.req.zip(io.enq.needAlloc).map(x => x._1.valid && x._2(1))
301  val loadEnqNumber = PopCount(loadEnqVec)
302  val storeEnqNumber = PopCount(storeEnqVec)
303  val isLastUopVec = io.enq.req.map(_.bits.lastUop)
304  val lqAllocNumber = PopCount(loadEnqVec.zip(isLastUopVec).map(x => x._1 && x._2))
305  val sqAllocNumber = PopCount(storeEnqVec.zip(isLastUopVec).map(x => x._1 && x._2))
306
307  // How to update ptr and counter:
308  // (1) by default, updated according to enq/commit
309  // (2) when redirect and dispatch queue is empty, update according to lsq
310  val t1_redirect = RegNext(io.redirect.valid)
311  val t2_redirect = RegNext(t1_redirect)
312  val t2_update = t2_redirect && !VecInit(io.enq.needAlloc.map(_.orR)).asUInt.orR
313  val t3_update = RegNext(t2_update)
314  val t3_lqCancelCnt = RegNext(io.lqCancelCnt)
315  val t3_sqCancelCnt = RegNext(io.sqCancelCnt)
316  when (t3_update) {
317    lqPtr := lqPtr - t3_lqCancelCnt
318    lqCounter := lqCounter + io.lcommit + t3_lqCancelCnt
319    sqPtr := sqPtr - t3_sqCancelCnt
320    sqCounter := sqCounter + io.scommit + t3_sqCancelCnt
321  }.elsewhen (!io.redirect.valid && io.enq.canAccept) {
322    lqPtr := lqPtr + lqAllocNumber
323    lqCounter := lqCounter + io.lcommit - lqAllocNumber
324    sqPtr := sqPtr + sqAllocNumber
325    sqCounter := sqCounter + io.scommit - sqAllocNumber
326  }.otherwise {
327    lqCounter := lqCounter + io.lcommit
328    sqCounter := sqCounter + io.scommit
329  }
330
331
332  val lqMaxAllocate = LSQLdEnqWidth
333  val sqMaxAllocate = LSQStEnqWidth
334  val maxAllocate = lqMaxAllocate max sqMaxAllocate
335  val ldCanAccept = lqCounter >= lqAllocNumber +& lqMaxAllocate.U
336  val sqCanAccept = sqCounter >= sqAllocNumber +& sqMaxAllocate.U
337  // It is possible that t3_update and enq are true at the same clock cycle.
338  // For example, if redirect.valid lasts more than one clock cycle,
339  // after the last redirect, new instructions may enter but previously redirect
340  // has not been resolved (updated according to the cancel count from LSQ).
341  // To solve the issue easily, we block enqueue when t3_update, which is RegNext(t2_update).
342  io.enq.canAccept := RegNext(ldCanAccept && sqCanAccept && !t2_update)
343  val lqOffset = Wire(Vec(io.enq.resp.length, UInt(log2Up(maxAllocate + 1).W)))
344  val sqOffset = Wire(Vec(io.enq.resp.length, UInt(log2Up(maxAllocate + 1).W)))
345  for ((resp, i) <- io.enq.resp.zipWithIndex) {
346    lqOffset(i) := PopCount(io.enq.needAlloc.zip(isLastUopVec).take(i).map(x => x._1(0) && x._2))
347    resp.lqIdx := lqPtr + lqOffset(i)
348    sqOffset(i) := PopCount(io.enq.needAlloc.zip(isLastUopVec).take(i).map(x => x._1(1) && x._2))
349    resp.sqIdx := sqPtr + sqOffset(i)
350  }
351
352  io.enqLsq.needAlloc := RegNext(VecInit(io.enq.needAlloc.zip(io.enq.req).map(x => x._1 & Fill(2, x._2.bits.lastUop))))
353  io.enqLsq.req.zip(io.enq.req).zip(io.enq.resp).foreach{ case ((toLsq, enq), resp) =>
354    val do_enq = enq.valid && !io.redirect.valid && io.enq.canAccept && enq.bits.lastUop
355    toLsq.valid := RegNext(do_enq)
356    toLsq.bits := RegEnable(enq.bits, do_enq)
357    toLsq.bits.lqIdx := RegEnable(resp.lqIdx, do_enq)
358    toLsq.bits.sqIdx := RegEnable(resp.sqIdx, do_enq)
359  }
360
361}