xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LSQWrapper.scala (revision 522c7f99f123cdd476fa93cd436a39ffff3d94b9)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import utility._
24import xiangshan._
25import xiangshan.backend.Bundles.{DynInst, MemExuOutput}
26import xiangshan.backend._
27import xiangshan.backend.rob.RobLsqIO
28import xiangshan.backend.fu.FuType
29import xiangshan.mem.Bundles._
30import xiangshan.cache._
31import xiangshan.cache.{DCacheWordIO, DCacheLineIO, MemoryOpConstants}
32import xiangshan.cache.{CMOReq, CMOResp}
33import xiangshan.cache.mmu.{TlbRequestIO, TlbHintIO}
34
35class ExceptionAddrIO(implicit p: Parameters) extends XSBundle {
36  val isStore = Input(Bool())
37  val vaddr = Output(UInt(XLEN.W))
38  val vaNeedExt = Output(Bool())
39  val isHyper = Output(Bool())
40  val vstart = Output(UInt((log2Up(VLEN) + 1).W))
41  val vl = Output(UInt((log2Up(VLEN) + 1).W))
42  val gpaddr = Output(UInt(XLEN.W))
43  val isForVSnonLeafPTE = Output(Bool())
44}
45
46class FwdEntry extends Bundle {
47  val validFast = Bool() // validFast is generated the same cycle with query
48  val valid = Bool() // valid is generated 1 cycle after query request
49  val data = UInt(8.W) // data is generated 1 cycle after query request
50}
51
52// inflight miss block reqs
53class InflightBlockInfo(implicit p: Parameters) extends XSBundle {
54  val block_addr = UInt(PAddrBits.W)
55  val valid = Bool()
56}
57
58class LsqEnqIO(implicit p: Parameters) extends MemBlockBundle {
59  val canAccept = Output(Bool())
60  val needAlloc = Vec(LSQEnqWidth, Input(UInt(2.W)))
61  val req       = Vec(LSQEnqWidth, Flipped(ValidIO(new DynInst)))
62  val iqAccept  = Input(Vec(LSQEnqWidth, Bool()))
63  val resp      = Vec(LSQEnqWidth, Output(new LSIdx))
64}
65
66// Load / Store Queue Wrapper for XiangShan Out of Order LSU
67class LsqWrapper(implicit p: Parameters) extends XSModule with HasDCacheParameters with HasPerfEvents {
68  val io = IO(new Bundle() {
69    val hartId = Input(UInt(hartIdLen.W))
70    val brqRedirect = Flipped(ValidIO(new Redirect))
71    val stvecFeedback = Vec(VecStorePipelineWidth, Flipped(ValidIO(new FeedbackToLsqIO)))
72    val ldvecFeedback = Vec(VecLoadPipelineWidth, Flipped(ValidIO(new FeedbackToLsqIO)))
73    val enq = new LsqEnqIO
74    val ldu = new Bundle() {
75        val stld_nuke_query = Vec(LoadPipelineWidth, Flipped(new LoadNukeQueryIO)) // from load_s2
76        val ldld_nuke_query = Vec(LoadPipelineWidth, Flipped(new LoadNukeQueryIO)) // from load_s2
77        val ldin = Vec(LoadPipelineWidth, Flipped(Decoupled(new LqWriteBundle))) // from load_s3
78    }
79    val sta = new Bundle() {
80      val storeMaskIn = Vec(StorePipelineWidth, Flipped(Valid(new StoreMaskBundle))) // from store_s0, store mask, send to sq from rs
81      val storeAddrIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) // from store_s1
82      val storeAddrInRe = Vec(StorePipelineWidth, Input(new LsPipelineBundle())) // from store_s2
83    }
84    val std = new Bundle() {
85      val storeDataIn = Vec(StorePipelineWidth, Flipped(Valid(new MemExuOutput(isVector = true)))) // from store_s0, store data, send to sq from rs
86    }
87    val ldout = Vec(LoadPipelineWidth, DecoupledIO(new MemExuOutput))
88    val ld_raw_data = Vec(LoadPipelineWidth, Output(new LoadDataFromLQBundle))
89    val ncOut = Vec(LoadPipelineWidth, DecoupledIO(new LsPipelineBundle))
90    val replay = Vec(LoadPipelineWidth, Decoupled(new LsPipelineBundle))
91    val sbuffer = Vec(EnsbufferWidth, Decoupled(new DCacheWordReqWithVaddrAndPfFlag))
92    val sbufferVecDifftestInfo = Vec(EnsbufferWidth, Decoupled(new DynInst)) // The vector store difftest needs is
93    val forward = Vec(LoadPipelineWidth, Flipped(new PipeLoadForwardQueryIO))
94    val rob = Flipped(new RobLsqIO)
95    val nuke_rollback = Vec(StorePipelineWidth, Output(Valid(new Redirect)))
96    val nack_rollback = Vec(1, Output(Valid(new Redirect))) // uncahce
97    val release = Flipped(Valid(new Release))
98   // val refill = Flipped(Valid(new Refill))
99    val tl_d_channel  = Input(new DcacheToLduForwardIO)
100    val maControl     = Flipped(new StoreMaBufToSqControlIO)
101    val uncacheOutstanding = Input(Bool())
102    val uncache = new UncacheWordIO
103    val mmioStout = DecoupledIO(new MemExuOutput) // writeback uncached store
104    val cboZeroStout = DecoupledIO(new MemExuOutput)
105    // TODO: implement vector store
106    val vecmmioStout = DecoupledIO(new MemExuOutput(isVector = true)) // vec writeback uncached store
107    val sqEmpty = Output(Bool())
108    val lq_rep_full = Output(Bool())
109    val sqFull = Output(Bool())
110    val lqFull = Output(Bool())
111    val sqCancelCnt = Output(UInt(log2Up(StoreQueueSize+1).W))
112    val lqCancelCnt = Output(UInt(log2Up(VirtualLoadQueueSize+1).W))
113    val lqDeq = Output(UInt(log2Up(CommitWidth + 1).W))
114    val sqDeq = Output(UInt(log2Ceil(EnsbufferWidth + 1).W))
115    val lqCanAccept = Output(Bool())
116    val sqCanAccept = Output(Bool())
117    val lqDeqPtr = Output(new LqPtr)
118    val sqDeqPtr = Output(new SqPtr)
119    val exceptionAddr = new ExceptionAddrIO
120    val loadMisalignFull = Input(Bool())
121    val misalignAllowSpec = Input(Bool())
122    val issuePtrExt = Output(new SqPtr)
123    val l2_hint = Input(Valid(new L2ToL1Hint()))
124    val tlb_hint = Flipped(new TlbHintIO)
125    val cmoOpReq  = DecoupledIO(new CMOReq)
126    val cmoOpResp = Flipped(DecoupledIO(new CMOResp))
127    val flushSbuffer = new SbufferFlushBundle
128    val force_write = Output(Bool())
129    val lqEmpty = Output(Bool())
130    val rarValidCount = Output(UInt())
131
132    // top-down
133    val debugTopDown = new LoadQueueTopDownIO
134    val noUopsIssued = Input(Bool())
135  })
136
137  val loadQueue = Module(new LoadQueue)
138  val storeQueue = Module(new StoreQueue)
139
140  storeQueue.io.hartId := io.hartId
141  storeQueue.io.uncacheOutstanding := io.uncacheOutstanding
142
143  if (backendParams.debugEn){ dontTouch(loadQueue.io.tlbReplayDelayCycleCtrl) }
144
145  // Todo: imm
146  val tlbReplayDelayCycleCtrl = WireInit(VecInit(Seq(14.U(ReSelectLen.W), 0.U(ReSelectLen.W), 125.U(ReSelectLen.W), 0.U(ReSelectLen.W))))
147  loadQueue.io.tlbReplayDelayCycleCtrl := tlbReplayDelayCycleCtrl
148
149  // io.enq logic
150  // LSQ: send out canAccept when both load queue and store queue are ready
151  // Dispatch: send instructions to LSQ only when they are ready
152  io.enq.canAccept := loadQueue.io.enq.canAccept && storeQueue.io.enq.canAccept
153  io.lqCanAccept := loadQueue.io.enq.canAccept
154  io.sqCanAccept := storeQueue.io.enq.canAccept
155  loadQueue.io.enq.sqCanAccept := storeQueue.io.enq.canAccept
156  storeQueue.io.enq.lqCanAccept := loadQueue.io.enq.canAccept
157  io.lqDeqPtr := loadQueue.io.lqDeqPtr
158  io.sqDeqPtr := storeQueue.io.sqDeqPtr
159  io.rarValidCount := loadQueue.io.rarValidCount
160  for (i <- io.enq.req.indices) {
161    loadQueue.io.enq.needAlloc(i)      := io.enq.needAlloc(i)(0)
162    loadQueue.io.enq.req(i).valid      := io.enq.needAlloc(i)(0) && io.enq.req(i).valid
163    loadQueue.io.enq.req(i).bits       := io.enq.req(i).bits
164    loadQueue.io.enq.req(i).bits.sqIdx := storeQueue.io.enq.resp(i)
165
166    storeQueue.io.enq.needAlloc(i)      := io.enq.needAlloc(i)(1)
167    storeQueue.io.enq.req(i).valid      := io.enq.needAlloc(i)(1) && io.enq.req(i).valid
168    storeQueue.io.enq.req(i).bits       := io.enq.req(i).bits
169    storeQueue.io.enq.req(i).bits.lqIdx := loadQueue.io.enq.resp(i)
170
171    io.enq.resp(i).lqIdx := loadQueue.io.enq.resp(i)
172    io.enq.resp(i).sqIdx := storeQueue.io.enq.resp(i)
173  }
174
175  // store queue wiring
176  storeQueue.io.brqRedirect <> io.brqRedirect
177  storeQueue.io.vecFeedback   <> io.stvecFeedback
178  storeQueue.io.storeAddrIn <> io.sta.storeAddrIn // from store_s1
179  storeQueue.io.storeAddrInRe <> io.sta.storeAddrInRe // from store_s2
180  storeQueue.io.storeDataIn <> io.std.storeDataIn // from store_s0
181  storeQueue.io.storeMaskIn <> io.sta.storeMaskIn // from store_s0
182  storeQueue.io.sbuffer     <> io.sbuffer
183  storeQueue.io.sbufferVecDifftestInfo <> io.sbufferVecDifftestInfo
184  storeQueue.io.mmioStout   <> io.mmioStout
185  storeQueue.io.cboZeroStout <> io.cboZeroStout
186  storeQueue.io.vecmmioStout <> io.vecmmioStout
187  storeQueue.io.rob         <> io.rob
188  storeQueue.io.exceptionAddr.isStore := DontCare
189  storeQueue.io.sqCancelCnt  <> io.sqCancelCnt
190  storeQueue.io.sqDeq        <> io.sqDeq
191  storeQueue.io.sqEmpty      <> io.sqEmpty
192  storeQueue.io.sqFull       <> io.sqFull
193  storeQueue.io.forward      <> io.forward // overlap forwardMask & forwardData, DO NOT CHANGE SEQUENCE
194  storeQueue.io.force_write  <> io.force_write
195  storeQueue.io.cmoOpReq     <> io.cmoOpReq
196  storeQueue.io.cmoOpResp    <> io.cmoOpResp
197  storeQueue.io.flushSbuffer <> io.flushSbuffer
198  storeQueue.io.maControl    <> io.maControl
199
200  /* <------- DANGEROUS: Don't change sequence here ! -------> */
201
202  //  load queue wiring
203  loadQueue.io.redirect            <> io.brqRedirect
204  loadQueue.io.vecFeedback           <> io.ldvecFeedback
205  loadQueue.io.ldu                 <> io.ldu
206  loadQueue.io.ldout               <> io.ldout
207  loadQueue.io.ld_raw_data         <> io.ld_raw_data
208  loadQueue.io.ncOut               <> io.ncOut
209  loadQueue.io.rob                 <> io.rob
210  loadQueue.io.nuke_rollback       <> io.nuke_rollback
211  loadQueue.io.nack_rollback       <> io.nack_rollback
212  loadQueue.io.replay              <> io.replay
213 // loadQueue.io.refill              <> io.refill
214  loadQueue.io.tl_d_channel        <> io.tl_d_channel
215  loadQueue.io.release             <> io.release
216  loadQueue.io.exceptionAddr.isStore := DontCare
217  loadQueue.io.loadMisalignFull    := io.loadMisalignFull
218  loadQueue.io.misalignAllowSpec   := io.misalignAllowSpec
219  loadQueue.io.lqCancelCnt         <> io.lqCancelCnt
220  loadQueue.io.sq.stAddrReadySqPtr <> storeQueue.io.stAddrReadySqPtr
221  loadQueue.io.sq.stAddrReadyVec   <> storeQueue.io.stAddrReadyVec
222  loadQueue.io.sq.stDataReadySqPtr <> storeQueue.io.stDataReadySqPtr
223  loadQueue.io.sq.stDataReadyVec   <> storeQueue.io.stDataReadyVec
224  loadQueue.io.sq.stIssuePtr       <> storeQueue.io.stIssuePtr
225  loadQueue.io.sq.sqEmpty          <> storeQueue.io.sqEmpty
226  loadQueue.io.sta.storeAddrIn     <> io.sta.storeAddrIn // store_s1
227  loadQueue.io.std.storeDataIn     <> io.std.storeDataIn // store_s0
228  loadQueue.io.lqFull              <> io.lqFull
229  loadQueue.io.lq_rep_full         <> io.lq_rep_full
230  loadQueue.io.lqDeq               <> io.lqDeq
231  loadQueue.io.l2_hint             <> io.l2_hint
232  loadQueue.io.tlb_hint            <> io.tlb_hint
233  loadQueue.io.lqEmpty             <> io.lqEmpty
234
235  // rob commits for lsq is delayed for two cycles, which causes the delayed update for deqPtr in lq/sq
236  // s0: commit
237  // s1:               exception find
238  // s2:               exception triggered
239  // s3: ptr updated & new address
240  // address will be used at the next cycle after exception is triggered
241  io.exceptionAddr.vaddr := Mux(RegNext(io.exceptionAddr.isStore), storeQueue.io.exceptionAddr.vaddr, loadQueue.io.exceptionAddr.vaddr)
242  io.exceptionAddr.vaNeedExt := Mux(RegNext(io.exceptionAddr.isStore), storeQueue.io.exceptionAddr.vaNeedExt, loadQueue.io.exceptionAddr.vaNeedExt)
243  io.exceptionAddr.isHyper := Mux(RegNext(io.exceptionAddr.isStore), storeQueue.io.exceptionAddr.isHyper, loadQueue.io.exceptionAddr.isHyper)
244  io.exceptionAddr.vstart := Mux(RegNext(io.exceptionAddr.isStore), storeQueue.io.exceptionAddr.vstart, loadQueue.io.exceptionAddr.vstart)
245  io.exceptionAddr.vl     := Mux(RegNext(io.exceptionAddr.isStore), storeQueue.io.exceptionAddr.vl, loadQueue.io.exceptionAddr.vl)
246  io.exceptionAddr.gpaddr := Mux(RegNext(io.exceptionAddr.isStore), storeQueue.io.exceptionAddr.gpaddr, loadQueue.io.exceptionAddr.gpaddr)
247  io.exceptionAddr.isForVSnonLeafPTE:= Mux(RegNext(io.exceptionAddr.isStore), storeQueue.io.exceptionAddr.isForVSnonLeafPTE, loadQueue.io.exceptionAddr.isForVSnonLeafPTE)
248  io.issuePtrExt := storeQueue.io.stAddrReadySqPtr
249
250  // naive uncache arbiter
251  val s_idle :: s_load :: s_store :: Nil = Enum(3)
252  val pendingstate = RegInit(s_idle)
253
254  switch(pendingstate){
255    is(s_idle){
256      when(io.uncache.req.fire){
257        pendingstate :=
258          Mux(io.uncacheOutstanding && io.uncache.req.bits.nc, s_idle,
259          Mux(loadQueue.io.uncache.req.valid, s_load,
260          s_store))
261      }
262    }
263    is(s_load){
264      when(io.uncache.resp.fire){
265        pendingstate := s_idle
266      }
267    }
268    is(s_store){
269      when(io.uncache.resp.fire){
270        pendingstate := s_idle
271      }
272    }
273  }
274
275  loadQueue.io.uncache := DontCare
276  storeQueue.io.uncache := DontCare
277  loadQueue.io.uncache.req.ready := false.B
278  storeQueue.io.uncache.req.ready := false.B
279  loadQueue.io.uncache.resp.valid := false.B
280  loadQueue.io.uncache.idResp.valid := false.B
281  storeQueue.io.uncache.resp.valid := false.B
282  storeQueue.io.uncache.idResp.valid := false.B
283  when(pendingstate === s_idle){
284    when(loadQueue.io.uncache.req.valid){
285      io.uncache.req <> loadQueue.io.uncache.req
286    }.otherwise{
287      io.uncache.req <> storeQueue.io.uncache.req
288    }
289  }.otherwise{
290    io.uncache.req.valid := false.B
291    io.uncache.req.bits := DontCare
292  }
293  when (io.uncache.resp.bits.is2lq) {
294    io.uncache.resp <> loadQueue.io.uncache.resp
295  } .otherwise {
296    io.uncache.resp <> storeQueue.io.uncache.resp
297  }
298  when(io.uncache.idResp.bits.is2lq) {
299    loadQueue.io.uncache.idResp <> io.uncache.idResp
300  }.otherwise {
301    storeQueue.io.uncache.idResp <> io.uncache.idResp
302  }
303
304  loadQueue.io.debugTopDown <> io.debugTopDown
305  loadQueue.io.noUopsIssed := io.noUopsIssued
306
307  assert(!(loadQueue.io.uncache.resp.valid && storeQueue.io.uncache.resp.valid))
308  assert(!(loadQueue.io.uncache.idResp.valid && storeQueue.io.uncache.idResp.valid))
309  when (!io.uncacheOutstanding) {
310    assert(!((loadQueue.io.uncache.resp.valid || storeQueue.io.uncache.resp.valid) && pendingstate === s_idle))
311  }
312
313
314  val perfEvents = Seq(loadQueue, storeQueue).flatMap(_.getPerfEvents)
315  generatePerfEvent()
316}
317
318class LsqEnqCtrl(implicit p: Parameters) extends XSModule
319  with HasVLSUParameters  {
320  val io = IO(new Bundle {
321    val redirect = Flipped(ValidIO(new Redirect))
322    // to dispatch
323    val enq = new LsqEnqIO
324    // from `memBlock.io.lqDeq
325    val lcommit = Input(UInt(log2Up(CommitWidth + 1).W))
326    // from `memBlock.io.sqDeq`
327    val scommit = Input(UInt(log2Ceil(EnsbufferWidth + 1).W))
328    // from/tp lsq
329    val lqCancelCnt = Input(UInt(log2Up(VirtualLoadQueueSize + 1).W))
330    val sqCancelCnt = Input(UInt(log2Up(StoreQueueSize + 1).W))
331    val lqFreeCount = Output(UInt(log2Up(VirtualLoadQueueSize + 1).W))
332    val sqFreeCount = Output(UInt(log2Up(StoreQueueSize + 1).W))
333    val enqLsq = Flipped(new LsqEnqIO)
334  })
335
336  val lqPtr = RegInit(0.U.asTypeOf(new LqPtr))
337  val sqPtr = RegInit(0.U.asTypeOf(new SqPtr))
338  val lqCounter = RegInit(VirtualLoadQueueSize.U(log2Up(VirtualLoadQueueSize + 1).W))
339  val sqCounter = RegInit(StoreQueueSize.U(log2Up(StoreQueueSize + 1).W))
340  val canAccept = RegInit(false.B)
341
342  val blockVec = io.enq.iqAccept.map(!_) :+ true.B
343  val numLsElem = io.enq.req.map(_.bits.numLsElem)
344  val needEnqLoadQueue = VecInit(io.enq.req.map(x => x.valid && (FuType.isLoad(x.bits.fuType) || FuType.isVNonsegLoad(x.bits.fuType))))
345  val needEnqStoreQueue = VecInit(io.enq.req.map(x => x.valid && (FuType.isStore(x.bits.fuType) || FuType.isVNonsegStore(x.bits.fuType))))
346  val loadQueueElem = needEnqLoadQueue.zip(numLsElem).map(x => Mux(x._1, x._2, 0.U))
347  val storeQueueElem = needEnqStoreQueue.zip(numLsElem).map(x => Mux(x._1, x._2, 0.U))
348  val loadFlowPopCount = 0.U +: loadQueueElem.zipWithIndex.map{ case (l, i) =>
349    loadQueueElem.take(i + 1).reduce(_ +& _).asTypeOf(UInt(elemIdxBits.W))
350  }
351  val storeFlowPopCount = 0.U +: storeQueueElem.zipWithIndex.map { case (s, i) =>
352    storeQueueElem.take(i + 1).reduce(_ +& _).asTypeOf(UInt(elemIdxBits.W))
353  }
354  val lqAllocNumber = PriorityMux(blockVec.zip(loadFlowPopCount))
355  val sqAllocNumber = PriorityMux(blockVec.zip(storeFlowPopCount))
356
357  io.lqFreeCount  := lqCounter
358  io.sqFreeCount  := sqCounter
359  // How to update ptr and counter:
360  // (1) by default, updated according to enq/commit
361  // (2) when redirect and dispatch queue is empty, update according to lsq
362  val t1_redirect = RegNext(io.redirect.valid)
363  val t2_redirect = RegNext(t1_redirect)
364  val t2_update = t2_redirect && !VecInit(io.enq.needAlloc.map(_.orR)).asUInt.orR
365  val t3_update = RegNext(t2_update)
366  val t3_lqCancelCnt = GatedRegNext(io.lqCancelCnt)
367  val t3_sqCancelCnt = GatedRegNext(io.sqCancelCnt)
368  when (t3_update) {
369    lqPtr := lqPtr - t3_lqCancelCnt
370    lqCounter := lqCounter + io.lcommit + t3_lqCancelCnt
371    sqPtr := sqPtr - t3_sqCancelCnt
372    sqCounter := sqCounter + io.scommit + t3_sqCancelCnt
373  }.elsewhen (!io.redirect.valid && io.enq.canAccept) {
374    lqPtr := lqPtr + lqAllocNumber
375    lqCounter := lqCounter + io.lcommit - lqAllocNumber
376    sqPtr := sqPtr + sqAllocNumber
377    sqCounter := sqCounter + io.scommit - sqAllocNumber
378  }.otherwise {
379    lqCounter := lqCounter + io.lcommit
380    sqCounter := sqCounter + io.scommit
381  }
382
383
384  //TODO MaxAllocate and width of lqOffset/sqOffset needs to be discussed
385  val lqMaxAllocate = LSQLdEnqWidth
386  val sqMaxAllocate = LSQStEnqWidth
387  val maxAllocate = lqMaxAllocate max sqMaxAllocate
388  val ldCanAccept = lqCounter >= lqAllocNumber +& lqMaxAllocate.U
389  val sqCanAccept = sqCounter >= sqAllocNumber +& sqMaxAllocate.U
390  // It is possible that t3_update and enq are true at the same clock cycle.
391  // For example, if redirect.valid lasts more than one clock cycle,
392  // after the last redirect, new instructions may enter but previously redirect has not been resolved (updated according to the cancel count from LSQ).
393  // To solve the issue easily, we block enqueue when t3_update, which is RegNext(t2_update).
394  io.enq.canAccept := RegNext(ldCanAccept && sqCanAccept && !t2_update)
395  val lqOffset = Wire(Vec(io.enq.resp.length, UInt(lqPtr.value.getWidth.W)))
396  val sqOffset = Wire(Vec(io.enq.resp.length, UInt(sqPtr.value.getWidth.W)))
397  for ((resp, i) <- io.enq.resp.zipWithIndex) {
398    lqOffset(i) := loadFlowPopCount(i)
399    resp.lqIdx := lqPtr + lqOffset(i)
400    sqOffset(i) := storeFlowPopCount(i)
401    resp.sqIdx := sqPtr + sqOffset(i)
402  }
403
404  io.enqLsq.needAlloc := RegNext(io.enq.needAlloc)
405  io.enqLsq.iqAccept := RegNext(io.enq.iqAccept)
406  io.enqLsq.req.zip(io.enq.req).zip(io.enq.resp).foreach{ case ((toLsq, enq), resp) =>
407    val do_enq = enq.valid && !io.redirect.valid && io.enq.canAccept
408    toLsq.valid := RegNext(do_enq)
409    toLsq.bits := RegEnable(enq.bits, do_enq)
410    toLsq.bits.lqIdx := RegEnable(resp.lqIdx, do_enq)
411    toLsq.bits.sqIdx := RegEnable(resp.sqIdx, do_enq)
412  }
413
414}
415