xref: /XiangShan/src/main/scala/xiangshan/mem/pipeline/LoadUnit.scala (revision 26af847e669bb208507278eafc6ebe52f03b0d19)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import utility._
24import xiangshan.ExceptionNO._
25import xiangshan._
26import xiangshan.backend.Bundles.{DynInst, MemExuInput, MemExuOutput}
27import xiangshan.backend.fu.PMPRespBundle
28import xiangshan.backend.fu.FuConfig._
29import xiangshan.backend.ctrlblock.{DebugLsInfoBundle, LsTopdownInfo}
30import xiangshan.backend.rob.RobPtr
31import xiangshan.backend.ctrlblock.DebugLsInfoBundle
32import xiangshan.backend.fu.util.SdtrigExt
33
34import xiangshan.cache._
35import xiangshan.cache.wpu.ReplayCarry
36import xiangshan.cache.mmu._
37import xiangshan.mem.mdp._
38
39class LoadToLsqReplayIO(implicit p: Parameters) extends XSBundle
40  with HasDCacheParameters
41  with HasTlbConst
42{
43  // mshr refill index
44  val mshr_id         = UInt(log2Up(cfg.nMissEntries).W)
45  // get full data from store queue and sbuffer
46  val full_fwd        = Bool()
47  // wait for data from store inst's store queue index
48  val data_inv_sq_idx = new SqPtr
49  // wait for address from store queue index
50  val addr_inv_sq_idx = new SqPtr
51  // replay carry
52  val rep_carry       = new ReplayCarry(nWays)
53  // data in last beat
54  val last_beat       = Bool()
55  // replay cause
56  val cause           = Vec(LoadReplayCauses.allCauses, Bool())
57  // performance debug information
58  val debug           = new PerfDebugInfo
59  // tlb hint
60  val tlb_id          = UInt(log2Up(loadfiltersize).W)
61  val tlb_full        = Bool()
62
63  // alias
64  def mem_amb       = cause(LoadReplayCauses.C_MA)
65  def tlb_miss      = cause(LoadReplayCauses.C_TM)
66  def fwd_fail      = cause(LoadReplayCauses.C_FF)
67  def dcache_rep    = cause(LoadReplayCauses.C_DR)
68  def dcache_miss   = cause(LoadReplayCauses.C_DM)
69  def wpu_fail      = cause(LoadReplayCauses.C_WF)
70  def bank_conflict = cause(LoadReplayCauses.C_BC)
71  def rar_nack      = cause(LoadReplayCauses.C_RAR)
72  def raw_nack      = cause(LoadReplayCauses.C_RAW)
73  def nuke          = cause(LoadReplayCauses.C_NK)
74  def need_rep      = cause.asUInt.orR
75}
76
77
78class LoadToLsqIO(implicit p: Parameters) extends XSBundle {
79  val ldin            = DecoupledIO(new LqWriteBundle)
80  val uncache         = Flipped(DecoupledIO(new MemExuOutput))
81  val ld_raw_data     = Input(new LoadDataFromLQBundle)
82  val forward         = new PipeLoadForwardQueryIO
83  val stld_nuke_query = new LoadNukeQueryIO
84  val ldld_nuke_query = new LoadNukeQueryIO
85  val trigger         = Flipped(new LqTriggerIO)
86}
87
88class LoadToLoadIO(implicit p: Parameters) extends XSBundle {
89  val valid      = Bool()
90  val data       = UInt(XLEN.W) // load to load fast path is limited to ld (64 bit) used as vaddr src1 only
91  val dly_ld_err = Bool()
92}
93
94class LoadUnitTriggerIO(implicit p: Parameters) extends XSBundle {
95  val tdata2      = Input(UInt(64.W))
96  val matchType   = Input(UInt(2.W))
97  val tEnable     = Input(Bool()) // timing is calculated before this
98  val addrHit     = Output(Bool())
99}
100
101class LoadUnit(implicit p: Parameters) extends XSModule
102  with HasLoadHelper
103  with HasPerfEvents
104  with HasDCacheParameters
105  with HasCircularQueuePtrHelper
106  with HasVLSUParameters
107  with SdtrigExt
108{
109  val io = IO(new Bundle() {
110    // control
111    val redirect      = Flipped(ValidIO(new Redirect))
112    val csrCtrl       = Flipped(new CustomCSRCtrlIO)
113
114    // int issue path
115    val ldin          = Flipped(Decoupled(new MemExuInput))
116    val ldout         = Decoupled(new MemExuOutput)
117
118    // vec issue path
119    val vecldin = Flipped(Decoupled(new VecPipeBundle))
120    val vecldout = Decoupled(new VecExuOutput)
121
122    // data path
123    val tlb           = new TlbRequestIO(2)
124    val pmp           = Flipped(new PMPRespBundle()) // arrive same to tlb now
125    val dcache        = new DCacheLoadIO
126    val sbuffer       = new LoadForwardQueryIO
127    val lsq           = new LoadToLsqIO
128    val tl_d_channel  = Input(new DcacheToLduForwardIO)
129    val forward_mshr  = Flipped(new LduToMissqueueForwardIO)
130    val refill        = Flipped(ValidIO(new Refill))
131    val l2_hint       = Input(Valid(new L2ToL1Hint))
132    val tlb_hint      = Flipped(new TlbHintReq)
133    // fast wakeup
134    // TODO: implement vector fast wakeup
135    val fast_uop = ValidIO(new DynInst) // early wakeup signal generated in load_s1, send to RS in load_s2
136
137    // trigger
138    val trigger = Vec(TriggerNum, new LoadUnitTriggerIO)
139
140    // prefetch
141    val prefetch_train            = ValidIO(new LdPrefetchTrainBundle()) // provide prefetch info to sms
142    val prefetch_train_l1         = ValidIO(new LdPrefetchTrainBundle()) // provide prefetch info to stream & stride
143    val prefetch_req              = Flipped(ValidIO(new L1PrefetchReq)) // hardware prefetch to l1 cache req
144    val canAcceptLowConfPrefetch  = Output(Bool())
145    val canAcceptHighConfPrefetch = Output(Bool())
146
147    // load to load fast path
148    val l2l_fwd_in    = Input(new LoadToLoadIO)
149    val l2l_fwd_out   = Output(new LoadToLoadIO)
150
151    val ld_fast_match    = Input(Bool())
152    val ld_fast_fuOpType = Input(UInt())
153    val ld_fast_imm      = Input(UInt(12.W))
154
155    // rs feedback
156    val wakeup = ValidIO(new DynInst)
157    val feedback_fast = ValidIO(new RSFeedback) // stage 2
158    val feedback_slow = ValidIO(new RSFeedback) // stage 3
159    val ldCancel = Output(new LoadCancelIO()) // use to cancel the uops waked by this load, and cancel load
160
161    // load ecc error
162    val s3_dly_ld_err = Output(Bool()) // Note that io.s3_dly_ld_err and io.lsq.s3_dly_ld_err is different
163
164    // schedule error query
165    val stld_nuke_query = Flipped(Vec(StorePipelineWidth, Valid(new StoreNukeQueryIO)))
166
167    // queue-based replay
168    val replay       = Flipped(Decoupled(new LsPipelineBundle))
169    val lq_rep_full  = Input(Bool())
170
171    // misc
172    val s2_ptr_chasing = Output(Bool()) // provide right pc for hw prefetch
173
174    // Load fast replay path
175    val fast_rep_in  = Flipped(Decoupled(new LqWriteBundle))
176    val fast_rep_out = Decoupled(new LqWriteBundle)
177
178    // Load RAR rollback
179    val rollback = Valid(new Redirect)
180
181    // perf
182    val debug_ls         = Output(new DebugLsInfoBundle)
183    val lsTopdownInfo    = Output(new LsTopdownInfo)
184    val correctMissTrain = Input(Bool())
185  })
186
187  val s1_ready, s2_ready, s3_ready = WireInit(false.B)
188
189  // Pipeline
190  // --------------------------------------------------------------------------------
191  // stage 0
192  // --------------------------------------------------------------------------------
193  // generate addr, use addr to query DCache and DTLB
194  val s0_valid         = Wire(Bool())
195  val s0_mmio_select   = Wire(Bool())
196  val s0_kill          = Wire(Bool())
197  val s0_can_go        = s1_ready
198  val s0_fire          = s0_valid && s0_can_go
199  val s0_mmio_fire     = s0_mmio_select && s0_can_go
200  val s0_out           = Wire(new LqWriteBundle)
201
202  // flow source bundle
203  class FlowSource extends Bundle {
204    val vaddr         = UInt(VAddrBits.W)
205    val mask          = UInt((VLEN/8).W)
206    val uop           = new DynInst
207    val try_l2l       = Bool()
208    val has_rob_entry = Bool()
209    val rsIdx         = UInt(log2Up(MemIQSizeMax).W)
210    val rep_carry     = new ReplayCarry(nWays)
211    val mshrid        = UInt(log2Up(cfg.nMissEntries).W)
212    val isFirstIssue  = Bool()
213    val fast_rep      = Bool()
214    val ld_rep        = Bool()
215    val l2l_fwd       = Bool()
216    val prf           = Bool()
217    val prf_rd        = Bool()
218    val prf_wr        = Bool()
219    val sched_idx     = UInt(log2Up(LoadQueueReplaySize+1).W)
220    // Record the issue port idx of load issue queue. This signal is used by load cancel.
221    val deqPortIdx    = UInt(log2Ceil(LoadPipelineWidth).W)
222    // vec only
223    val isvec         = Bool()
224    val is128bit      = Bool()
225    val uop_unit_stride_fof = Bool()
226    val reg_offset    = UInt(vOffsetBits.W)
227    val vecActive     = Bool() // 1: vector active element or scala mem operation, 0: vector not active element
228    val is_first_ele  = Bool()
229    // val flowPtr       = new VlflowPtr
230    val usSecondInv  = Bool()
231  }
232  val s0_sel_src = Wire(new FlowSource)
233
234  // load flow select/gen
235  // src0: super load replayed by LSQ (cache miss replay) (io.replay)
236  // src1: fast load replay (io.fast_rep_in)
237  // src2: mmio (io.lsq.uncache)
238  // src3: load replayed by LSQ (io.replay)
239  // src4: hardware prefetch from prefetchor (high confidence) (io.prefetch)
240  // NOTE: Now vec/int loads are sent from same RS
241  //       A vec load will be splited into multiple uops,
242  //       so as long as one uop is issued,
243  //       the other uops should have higher priority
244  // src5: vec read from RS (io.vecldin)
245  // src6: int read / software prefetch first issue from RS (io.in)
246  // src7: load try pointchaising when no issued or replayed load (io.fastpath)
247  // src8: hardware prefetch from prefetchor (high confidence) (io.prefetch)
248  // priority: high to low
249  val s0_rep_stall           = io.ldin.valid && isAfter(io.replay.bits.uop.robIdx, io.ldin.bits.uop.robIdx)
250  val s0_super_ld_rep_valid  = io.replay.valid && io.replay.bits.forward_tlDchannel
251  val s0_ld_fast_rep_valid   = io.fast_rep_in.valid
252  val s0_ld_mmio_valid       = io.lsq.uncache.valid
253  val s0_ld_rep_valid        = io.replay.valid && !io.replay.bits.forward_tlDchannel && !s0_rep_stall
254  val s0_high_conf_prf_valid = io.prefetch_req.valid && io.prefetch_req.bits.confidence > 0.U
255  val s0_vec_iss_valid       = io.vecldin.valid
256  val s0_int_iss_valid       = io.ldin.valid // int flow first issue or software prefetch
257  val s0_l2l_fwd_valid       = io.l2l_fwd_in.valid
258  val s0_low_conf_prf_valid  = io.prefetch_req.valid && io.prefetch_req.bits.confidence === 0.U
259  val s0_is128bit            = is128Bit(io.vecldin.bits.alignedType) && io.vecldin.valid
260  dontTouch(s0_super_ld_rep_valid)
261  dontTouch(s0_ld_fast_rep_valid)
262  dontTouch(s0_ld_mmio_valid)
263  dontTouch(s0_ld_rep_valid)
264  dontTouch(s0_high_conf_prf_valid)
265  dontTouch(s0_vec_iss_valid)
266  dontTouch(s0_int_iss_valid)
267  dontTouch(s0_l2l_fwd_valid)
268  dontTouch(s0_low_conf_prf_valid)
269
270  // load flow source ready
271  val s0_super_ld_rep_ready  = WireInit(true.B)
272  val s0_ld_fast_rep_ready   = !s0_super_ld_rep_valid
273  val s0_ld_mmio_ready       = !s0_super_ld_rep_valid &&
274                               !s0_ld_fast_rep_valid
275  val s0_ld_rep_ready        = !s0_super_ld_rep_valid &&
276                               !s0_ld_fast_rep_valid &&
277                               !s0_ld_mmio_valid
278  val s0_high_conf_prf_ready = !s0_super_ld_rep_valid &&
279                               !s0_ld_fast_rep_valid &&
280                               !s0_ld_mmio_valid &&
281                               !s0_ld_rep_valid
282
283  val s0_vec_iss_ready       = !s0_super_ld_rep_valid &&
284                               !s0_ld_fast_rep_valid &&
285                               !s0_ld_mmio_valid &&
286                               !s0_ld_rep_valid &&
287                               !s0_high_conf_prf_valid
288
289  val s0_int_iss_ready       = !s0_super_ld_rep_valid &&
290                               !s0_ld_fast_rep_valid &&
291                               !s0_ld_mmio_valid &&
292                               !s0_ld_rep_valid &&
293                               !s0_high_conf_prf_valid &&
294                               !s0_vec_iss_valid
295
296  val s0_l2l_fwd_ready       = !s0_super_ld_rep_valid &&
297                               !s0_ld_fast_rep_valid &&
298                               !s0_ld_mmio_valid &&
299                               !s0_ld_rep_valid &&
300                               !s0_high_conf_prf_valid &&
301                               !s0_int_iss_valid &&
302                               !s0_vec_iss_valid
303
304  val s0_low_conf_prf_ready  = !s0_super_ld_rep_valid &&
305                               !s0_ld_fast_rep_valid &&
306                               !s0_ld_mmio_valid &&
307                               !s0_ld_rep_valid &&
308                               !s0_high_conf_prf_valid &&
309                               !s0_int_iss_valid &&
310                               !s0_vec_iss_valid &&
311                               !s0_l2l_fwd_valid
312  dontTouch(s0_super_ld_rep_ready)
313  dontTouch(s0_ld_fast_rep_ready)
314  dontTouch(s0_ld_mmio_ready)
315  dontTouch(s0_ld_rep_ready)
316  dontTouch(s0_high_conf_prf_ready)
317  dontTouch(s0_vec_iss_ready)
318  dontTouch(s0_int_iss_ready)
319  dontTouch(s0_l2l_fwd_ready)
320  dontTouch(s0_low_conf_prf_ready)
321
322  // load flow source select (OH)
323  val s0_super_ld_rep_select = s0_super_ld_rep_valid && s0_super_ld_rep_ready
324  val s0_ld_fast_rep_select  = s0_ld_fast_rep_valid && s0_ld_fast_rep_ready
325  val s0_ld_mmio_select      = s0_ld_mmio_valid && s0_ld_mmio_ready
326  val s0_ld_rep_select       = s0_ld_rep_valid && s0_ld_rep_ready
327  val s0_hw_prf_select       = s0_high_conf_prf_ready && s0_high_conf_prf_valid ||
328                               s0_low_conf_prf_ready && s0_low_conf_prf_valid
329  val s0_vec_iss_select      = s0_vec_iss_ready && s0_vec_iss_valid
330  val s0_int_iss_select      = s0_int_iss_ready && s0_int_iss_valid
331  val s0_l2l_fwd_select      = s0_l2l_fwd_ready && s0_l2l_fwd_valid
332  dontTouch(s0_super_ld_rep_select)
333  dontTouch(s0_ld_fast_rep_select)
334  dontTouch(s0_ld_mmio_select)
335  dontTouch(s0_ld_rep_select)
336  dontTouch(s0_hw_prf_select)
337  dontTouch(s0_vec_iss_select)
338  dontTouch(s0_int_iss_select)
339  dontTouch(s0_l2l_fwd_select)
340
341  s0_valid := (s0_super_ld_rep_valid ||
342               s0_ld_fast_rep_valid ||
343               s0_ld_rep_valid ||
344               s0_high_conf_prf_valid ||
345               s0_vec_iss_valid ||
346               s0_int_iss_valid ||
347               s0_l2l_fwd_valid ||
348               s0_low_conf_prf_valid) && !s0_ld_mmio_select && io.dcache.req.ready && !s0_kill
349
350  s0_mmio_select := s0_ld_mmio_select && !s0_kill
351
352  // which is S0's out is ready and dcache is ready
353  val s0_try_ptr_chasing      = s0_l2l_fwd_select
354  val s0_do_try_ptr_chasing   = s0_try_ptr_chasing && s0_can_go && io.dcache.req.ready
355  val s0_ptr_chasing_vaddr    = io.l2l_fwd_in.data(5, 0) +& io.ld_fast_imm(5, 0)
356  val s0_ptr_chasing_canceled = WireInit(false.B)
357  s0_kill := s0_ptr_chasing_canceled
358
359  // prefetch related ctrl signal
360  io.canAcceptLowConfPrefetch  := s0_low_conf_prf_ready
361  io.canAcceptHighConfPrefetch := s0_high_conf_prf_ready
362
363  // query DTLB
364  io.tlb.req.valid                   := s0_valid
365  io.tlb.req.bits.cmd                := Mux(s0_sel_src.prf,
366                                         Mux(s0_sel_src.prf_wr, TlbCmd.write, TlbCmd.read),
367                                         TlbCmd.read
368                                       )
369  io.tlb.req.bits.vaddr              := Mux(s0_hw_prf_select, io.prefetch_req.bits.paddr, s0_sel_src.vaddr)
370  io.tlb.req.bits.size               := Mux(s0_sel_src.isvec, io.vecldin.bits.alignedType(2,0), LSUOpType.size(s0_sel_src.uop.fuOpType)) // FIXME : currently not use, 128 bit load will error if use it
371  io.tlb.req.bits.kill               := s0_kill
372  io.tlb.req.bits.memidx.is_ld       := true.B
373  io.tlb.req.bits.memidx.is_st       := false.B
374  io.tlb.req.bits.memidx.idx         := s0_sel_src.uop.lqIdx.value
375  io.tlb.req.bits.debug.robIdx       := s0_sel_src.uop.robIdx
376  io.tlb.req.bits.no_translate       := s0_hw_prf_select  // hw b.reqetch addr does not need to be translated
377  io.tlb.req.bits.debug.pc           := s0_sel_src.uop.pc
378  io.tlb.req.bits.debug.isFirstIssue := s0_sel_src.isFirstIssue
379
380  // query DCache
381  io.dcache.req.valid             := s0_valid
382  io.dcache.req.bits.cmd          := Mux(s0_sel_src.prf_rd,
383                                      MemoryOpConstants.M_PFR,
384                                      Mux(s0_sel_src.prf_wr, MemoryOpConstants.M_PFW, MemoryOpConstants.M_XRD)
385                                    )
386  io.dcache.req.bits.vaddr        := s0_sel_src.vaddr
387  io.dcache.req.bits.mask         := s0_sel_src.mask
388  io.dcache.req.bits.data         := DontCare
389  io.dcache.req.bits.isFirstIssue := s0_sel_src.isFirstIssue
390  io.dcache.req.bits.instrtype    := Mux(s0_sel_src.prf, DCACHE_PREFETCH_SOURCE.U, LOAD_SOURCE.U)
391  io.dcache.req.bits.debug_robIdx := s0_sel_src.uop.robIdx.value
392  io.dcache.req.bits.replayCarry  := s0_sel_src.rep_carry
393  io.dcache.req.bits.id           := DontCare // TODO: update cache meta
394  io.dcache.pf_source             := Mux(s0_hw_prf_select, io.prefetch_req.bits.pf_source.value, L1_HW_PREFETCH_NULL)
395  io.dcache.is128Req              := s0_is128bit && s0_vec_iss_select
396
397  // load flow priority mux
398  def fromNullSource(): FlowSource = {
399    val out = WireInit(0.U.asTypeOf(new FlowSource))
400    out
401  }
402
403  def fromFastReplaySource(src: LqWriteBundle): FlowSource = {
404    val out = WireInit(0.U.asTypeOf(new FlowSource))
405    out.vaddr         := src.vaddr
406    out.mask          := src.mask
407    out.uop           := src.uop
408    out.try_l2l       := false.B
409    out.has_rob_entry := src.hasROBEntry
410    out.rep_carry     := src.rep_info.rep_carry
411    out.mshrid        := src.rep_info.mshr_id
412    out.rsIdx         := src.rsIdx
413    out.isFirstIssue  := false.B
414    out.fast_rep      := true.B
415    out.ld_rep        := src.isLoadReplay
416    out.l2l_fwd       := false.B
417    out.prf           := LSUOpType.isPrefetch(src.uop.fuOpType)
418    out.prf_rd        := src.uop.fuOpType === LSUOpType.prefetch_r
419    out.prf_wr        := src.uop.fuOpType === LSUOpType.prefetch_w
420    out.sched_idx     := src.schedIndex
421    out.vecActive     := true.B // true for scala load
422    out
423  }
424
425  def fromMmioSource(src: MemExuOutput) = {
426    val out = WireInit(0.U.asTypeOf(new FlowSource))
427    out.vaddr        := 0.U
428    out.mask          := 0.U
429    out.uop           := src.uop
430    out.try_l2l       := false.B
431    out.has_rob_entry := false.B
432    out.rsIdx         := 0.U
433    out.rep_carry     := 0.U.asTypeOf(out.rep_carry)
434    out.mshrid        := 0.U
435    out.isFirstIssue  := false.B
436    out.fast_rep      := false.B
437    out.ld_rep        := false.B
438    out.l2l_fwd       := false.B
439    out.prf           := false.B
440    out.prf_rd        := false.B
441    out.prf_wr        := false.B
442    out.sched_idx     := 0.U
443    out.vecActive     := true.B
444    out
445  }
446
447  def fromNormalReplaySource(src: LsPipelineBundle): FlowSource = {
448    val out = WireInit(0.U.asTypeOf(new FlowSource))
449    out.vaddr         := src.vaddr
450    out.mask          := genVWmask(src.vaddr, src.uop.fuOpType(1, 0))
451    out.uop           := src.uop
452    out.try_l2l       := false.B
453    out.has_rob_entry := true.B
454    out.rsIdx         := src.rsIdx
455    out.rep_carry     := src.replayCarry
456    out.mshrid        := src.mshrid
457    out.isFirstIssue  := false.B
458    out.fast_rep      := false.B
459    out.ld_rep        := true.B
460    out.l2l_fwd       := false.B
461    out.prf           := LSUOpType.isPrefetch(src.uop.fuOpType)
462    out.prf_rd        := src.uop.fuOpType === LSUOpType.prefetch_r
463    out.prf_wr        := src.uop.fuOpType === LSUOpType.prefetch_w
464    out.sched_idx     := src.schedIndex
465    out.vecActive     := true.B // true for scala load
466    out
467  }
468
469  def fromPrefetchSource(src: L1PrefetchReq): FlowSource = {
470    val out = WireInit(0.U.asTypeOf(new FlowSource))
471    out.vaddr         := src.getVaddr()
472    out.mask          := 0.U
473    out.uop           := DontCare
474    out.try_l2l       := false.B
475    out.has_rob_entry := false.B
476    out.rsIdx         := 0.U
477    out.rep_carry     := 0.U.asTypeOf(out.rep_carry)
478    out.mshrid        := 0.U
479    out.isFirstIssue  := false.B
480    out.fast_rep      := false.B
481    out.ld_rep        := false.B
482    out.l2l_fwd       := false.B
483    out.prf           := true.B
484    out.prf_rd        := !src.is_store
485    out.prf_wr        := src.is_store
486    out.sched_idx     := 0.U
487    out.vecActive     := true.B // true for scala load
488    out
489  }
490
491  def fromVecIssueSource(src: VecPipeBundle): FlowSource = {
492    val out = WireInit(0.U.asTypeOf(new FlowSource))
493    out.vaddr         := src.vaddr
494    out.mask          := src.mask
495    out.uop           := src.uop
496    out.try_l2l       := false.B
497    out.has_rob_entry := true.B
498    // TODO: VLSU, implement vector feedback
499    out.rsIdx         := 0.U
500    // TODO: VLSU, implement replay carry
501    out.rep_carry     := 0.U.asTypeOf(out.rep_carry)
502    out.mshrid        := 0.U
503    // TODO: VLSU, implement first issue
504//    out.isFirstIssue  := src.isFirstIssue
505    out.fast_rep      := false.B
506    out.ld_rep        := false.B
507    out.l2l_fwd       := false.B
508    out.prf           := false.B
509    out.prf_rd        := false.B
510    out.prf_wr        := false.B
511    out.sched_idx     := 0.U
512    // Vector load interface
513    out.isvec               := true.B
514    // vector loads only access a single element at a time, so 128-bit path is not used for now
515    out.is128bit            := is128Bit(src.alignedType)
516    out.uop_unit_stride_fof := src.uop_unit_stride_fof
517    // out.rob_idx_valid       := src.rob_idx_valid
518    // out.inner_idx           := src.inner_idx
519    // out.rob_idx             := src.rob_idx
520    out.reg_offset          := src.reg_offset
521    // out.offset              := src.offset
522    out.vecActive           := src.vecActive
523    out.is_first_ele        := src.is_first_ele
524    // out.flowPtr             := src.flowPtr
525    out.usSecondInv        := src.usSecondInv
526    out
527  }
528
529  def fromIntIssueSource(src: MemExuInput): FlowSource = {
530    val out = WireInit(0.U.asTypeOf(new FlowSource))
531    out.vaddr         := src.src(0) + SignExt(src.uop.imm(11, 0), VAddrBits)
532    out.mask          := genVWmask(out.vaddr, src.uop.fuOpType(1,0))
533    out.uop           := src.uop
534    out.try_l2l       := false.B
535    out.has_rob_entry := true.B
536    out.rsIdx         := src.iqIdx
537    out.rep_carry     := 0.U.asTypeOf(out.rep_carry)
538    out.mshrid        := 0.U
539    out.isFirstIssue  := true.B
540    out.fast_rep      := false.B
541    out.ld_rep        := false.B
542    out.l2l_fwd       := false.B
543    out.prf           := LSUOpType.isPrefetch(src.uop.fuOpType)
544    out.prf_rd        := src.uop.fuOpType === LSUOpType.prefetch_r
545    out.prf_wr        := src.uop.fuOpType === LSUOpType.prefetch_w
546    out.sched_idx     := 0.U
547    out.vecActive     := true.B // true for scala load
548    out
549  }
550
551  def fromLoadToLoadSource(src: LoadToLoadIO): FlowSource = {
552    val out = WireInit(0.U.asTypeOf(new FlowSource))
553    out.vaddr              := Cat(src.data(XLEN-1, 6), s0_ptr_chasing_vaddr(5,0))
554    out.mask               := genVWmask(0.U, LSUOpType.ld)
555    // When there's no valid instruction from RS and LSQ, we try the load-to-load forwarding.
556    // Assume the pointer chasing is always ld.
557    out.uop.fuOpType       := LSUOpType.ld
558    out.try_l2l            := true.B
559    // we dont care out.isFirstIssue and out.rsIdx and s0_sqIdx in S0 when trying pointchasing
560    // because these signals will be updated in S1
561    out.has_rob_entry      := false.B
562    out.rsIdx              := 0.U
563    out.mshrid             := 0.U
564    out.rep_carry          := 0.U.asTypeOf(out.rep_carry)
565    out.isFirstIssue       := true.B
566    out.fast_rep           := false.B
567    out.ld_rep             := false.B
568    out.l2l_fwd            := true.B
569    out.prf                := false.B
570    out.prf_rd             := false.B
571    out.prf_wr             := false.B
572    out.sched_idx          := 0.U
573    out.vecActive          := true.B // true for scala load
574    out
575  }
576
577  // set default
578  val s0_src_selector = Seq(
579    s0_super_ld_rep_select,
580    s0_ld_fast_rep_select,
581    s0_ld_mmio_select,
582    s0_ld_rep_select,
583    s0_hw_prf_select,
584    s0_vec_iss_select,
585    s0_int_iss_select,
586    (if (EnableLoadToLoadForward) s0_l2l_fwd_select else true.B)
587  )
588  val s0_src_format = Seq(
589    fromNormalReplaySource(io.replay.bits),
590    fromFastReplaySource(io.fast_rep_in.bits),
591    fromMmioSource(io.lsq.uncache.bits),
592    fromNormalReplaySource(io.replay.bits),
593    fromPrefetchSource(io.prefetch_req.bits),
594    fromVecIssueSource(io.vecldin.bits),
595    fromIntIssueSource(io.ldin.bits),
596    (if (EnableLoadToLoadForward) fromLoadToLoadSource(io.l2l_fwd_in) else fromNullSource())
597  )
598  s0_sel_src := ParallelPriorityMux(s0_src_selector, s0_src_format)
599
600  // address align check
601  val s0_addr_aligned = LookupTree(Mux(s0_sel_src.isvec, io.vecldin.bits.alignedType(1,0), s0_sel_src.uop.fuOpType(1, 0)), List(
602    "b00".U   -> true.B,                   //b
603    "b01".U   -> (s0_sel_src.vaddr(0)    === 0.U), //h
604    "b10".U   -> (s0_sel_src.vaddr(1, 0) === 0.U), //w
605    "b11".U   -> (s0_sel_src.vaddr(2, 0) === 0.U)  //d
606  ))
607  XSError(s0_sel_src.isvec && s0_sel_src.vaddr(3, 0) =/= 0.U && io.vecldin.bits.alignedType(2), "unit-stride 128 bit element is not aligned!")
608
609  // accept load flow if dcache ready (tlb is always ready)
610  // TODO: prefetch need writeback to loadQueueFlag
611  s0_out               := DontCare
612  s0_out.rsIdx         := s0_sel_src.rsIdx
613  s0_out.vaddr         := s0_sel_src.vaddr
614  s0_out.mask          := s0_sel_src.mask
615  s0_out.uop           := s0_sel_src.uop
616  s0_out.isFirstIssue  := s0_sel_src.isFirstIssue
617  s0_out.hasROBEntry   := s0_sel_src.has_rob_entry
618  s0_out.isPrefetch    := s0_sel_src.prf
619  s0_out.isHWPrefetch  := s0_hw_prf_select
620  s0_out.isFastReplay  := s0_sel_src.fast_rep
621  s0_out.isLoadReplay  := s0_sel_src.ld_rep
622  s0_out.isFastPath    := s0_sel_src.l2l_fwd
623  s0_out.mshrid        := s0_sel_src.mshrid
624  s0_out.isvec           := s0_sel_src.isvec
625  s0_out.is128bit        := s0_sel_src.is128bit
626  s0_out.uop_unit_stride_fof := s0_sel_src.uop_unit_stride_fof
627  // s0_out.rob_idx_valid   := s0_rob_idx_valid
628  // s0_out.inner_idx       := s0_inner_idx
629  // s0_out.rob_idx         := s0_rob_idx
630  s0_out.reg_offset      := s0_sel_src.reg_offset
631  // s0_out.offset          := s0_offset
632  s0_out.vecActive             := s0_sel_src.vecActive
633  s0_out.usSecondInv    := s0_sel_src.usSecondInv
634  s0_out.is_first_ele    := s0_sel_src.is_first_ele
635  // s0_out.flowPtr         := s0_sel_src.flowPtr
636  s0_out.uop.exceptionVec(loadAddrMisaligned) := !s0_addr_aligned && s0_sel_src.vecActive
637  s0_out.forward_tlDchannel := s0_super_ld_rep_select
638  when(io.tlb.req.valid && s0_sel_src.isFirstIssue) {
639    s0_out.uop.debugInfo.tlbFirstReqTime := GTimer()
640  }.otherwise{
641    s0_out.uop.debugInfo.tlbFirstReqTime := s0_sel_src.uop.debugInfo.tlbFirstReqTime
642  }
643  s0_out.schedIndex     := s0_sel_src.sched_idx
644
645  // load fast replay
646  io.fast_rep_in.ready := (s0_can_go && io.dcache.req.ready && s0_ld_fast_rep_ready)
647
648  // mmio
649  io.lsq.uncache.ready := s0_mmio_fire
650
651  // load flow source ready
652  // cache missed load has highest priority
653  // always accept cache missed load flow from load replay queue
654  io.replay.ready := (s0_can_go && io.dcache.req.ready && (s0_ld_rep_ready && !s0_rep_stall || s0_super_ld_rep_select))
655
656  // accept load flow from rs when:
657  // 1) there is no lsq-replayed load
658  // 2) there is no fast replayed load
659  // 3) there is no high confidence prefetch request
660  io.vecldin.ready := s0_can_go && io.dcache.req.ready && s0_vec_iss_ready
661  io.ldin.ready := s0_can_go && io.dcache.req.ready && s0_int_iss_ready
662
663  // for hw prefetch load flow feedback, to be added later
664  // io.prefetch_in.ready := s0_hw_prf_select
665
666  // dcache replacement extra info
667  // TODO: should prefetch load update replacement?
668  io.dcache.replacementUpdated := Mux(s0_ld_rep_select || s0_super_ld_rep_select, io.replay.bits.replacementUpdated, false.B)
669
670  // load wakeup
671  // TODO: vector load wakeup?
672  io.wakeup.valid := s0_fire && (s0_super_ld_rep_select || s0_ld_fast_rep_select || s0_ld_rep_select || s0_int_iss_select) || s0_mmio_fire
673  io.wakeup.bits := s0_out.uop
674
675  XSDebug(io.dcache.req.fire,
676    p"[DCACHE LOAD REQ] pc ${Hexadecimal(s0_sel_src.uop.pc)}, vaddr ${Hexadecimal(s0_sel_src.vaddr)}\n"
677  )
678  XSDebug(s0_valid,
679    p"S0: pc ${Hexadecimal(s0_out.uop.pc)}, lId ${Hexadecimal(s0_out.uop.lqIdx.asUInt)}, " +
680    p"vaddr ${Hexadecimal(s0_out.vaddr)}, mask ${Hexadecimal(s0_out.mask)}\n")
681
682  // Pipeline
683  // --------------------------------------------------------------------------------
684  // stage 1
685  // --------------------------------------------------------------------------------
686  // TLB resp (send paddr to dcache)
687  val s1_valid      = RegInit(false.B)
688  val s1_in         = Wire(new LqWriteBundle)
689  val s1_out        = Wire(new LqWriteBundle)
690  val s1_kill       = Wire(Bool())
691  val s1_can_go     = s2_ready
692  val s1_fire       = s1_valid && !s1_kill && s1_can_go
693  val s1_vecActive        = RegEnable(s0_out.vecActive, true.B, s0_fire)
694  val s1_vec_alignedType = RegEnable(io.vecldin.bits.alignedType, s0_fire)
695
696  s1_ready := !s1_valid || s1_kill || s2_ready
697  when (s0_fire) { s1_valid := true.B }
698  .elsewhen (s1_fire) { s1_valid := false.B }
699  .elsewhen (s1_kill) { s1_valid := false.B }
700  s1_in   := RegEnable(s0_out, s0_fire)
701
702  val s1_fast_rep_dly_kill = RegNext(io.fast_rep_in.bits.lateKill) && s1_in.isFastReplay
703  val s1_fast_rep_dly_err =  RegNext(io.fast_rep_in.bits.delayedLoadError) && s1_in.isFastReplay
704  val s1_l2l_fwd_dly_err  = RegNext(io.l2l_fwd_in.dly_ld_err) && s1_in.isFastPath
705  val s1_dly_err          = s1_fast_rep_dly_err || s1_l2l_fwd_dly_err
706  val s1_vaddr_hi         = Wire(UInt())
707  val s1_vaddr_lo         = Wire(UInt())
708  val s1_vaddr            = Wire(UInt())
709  val s1_paddr_dup_lsu    = Wire(UInt())
710  val s1_paddr_dup_dcache = Wire(UInt())
711  val s1_exception        = ExceptionNO.selectByFu(s1_out.uop.exceptionVec, LduCfg).asUInt.orR   // af & pf exception were modified below.
712  val s1_tlb_miss         = io.tlb.resp.bits.miss
713  val s1_prf              = s1_in.isPrefetch
714  val s1_hw_prf           = s1_in.isHWPrefetch
715  val s1_sw_prf           = s1_prf && !s1_hw_prf
716  val s1_tlb_memidx       = io.tlb.resp.bits.memidx
717
718  s1_vaddr_hi         := s1_in.vaddr(VAddrBits - 1, 6)
719  s1_vaddr_lo         := s1_in.vaddr(5, 0)
720  s1_vaddr            := Cat(s1_vaddr_hi, s1_vaddr_lo)
721  s1_paddr_dup_lsu    := io.tlb.resp.bits.paddr(0)
722  s1_paddr_dup_dcache := io.tlb.resp.bits.paddr(1)
723
724  when (s1_tlb_memidx.is_ld && io.tlb.resp.valid && !s1_tlb_miss && s1_tlb_memidx.idx === s1_in.uop.lqIdx.value) {
725    // printf("load idx = %d\n", s1_tlb_memidx.idx)
726    s1_out.uop.debugInfo.tlbRespTime := GTimer()
727  }
728
729  io.tlb.req_kill   := s1_kill || s1_dly_err
730  io.tlb.resp.ready := true.B
731
732  io.dcache.s1_paddr_dup_lsu    <> s1_paddr_dup_lsu
733  io.dcache.s1_paddr_dup_dcache <> s1_paddr_dup_dcache
734  io.dcache.s1_kill             := s1_kill || s1_dly_err || s1_tlb_miss || s1_exception
735
736  // store to load forwarding
737  io.sbuffer.valid := s1_valid && !(s1_exception || s1_tlb_miss || s1_kill || s1_dly_err || s1_prf)
738  io.sbuffer.vaddr := s1_vaddr
739  io.sbuffer.paddr := s1_paddr_dup_lsu
740  io.sbuffer.uop   := s1_in.uop
741  io.sbuffer.sqIdx := s1_in.uop.sqIdx
742  io.sbuffer.mask  := s1_in.mask
743  io.sbuffer.pc    := s1_in.uop.pc // FIXME: remove it
744
745  io.lsq.forward.valid     := s1_valid && !(s1_exception || s1_tlb_miss || s1_kill || s1_dly_err || s1_prf)
746  io.lsq.forward.vaddr     := s1_vaddr
747  io.lsq.forward.paddr     := s1_paddr_dup_lsu
748  io.lsq.forward.uop       := s1_in.uop
749  io.lsq.forward.sqIdx     := s1_in.uop.sqIdx
750  io.lsq.forward.sqIdxMask := 0.U
751  io.lsq.forward.mask      := s1_in.mask
752  io.lsq.forward.pc        := s1_in.uop.pc // FIXME: remove it
753
754  // st-ld violation query
755  val s1_nuke_paddr_match = VecInit((0 until StorePipelineWidth).map(w => {Mux(s1_in.isvec && s1_in.is128bit,
756    s1_paddr_dup_lsu(PAddrBits-1, 4) === io.stld_nuke_query(w).bits.paddr(PAddrBits-1, 4),
757    s1_paddr_dup_lsu(PAddrBits-1, 3) === io.stld_nuke_query(w).bits.paddr(PAddrBits-1, 3))}))
758  val s1_nuke = VecInit((0 until StorePipelineWidth).map(w => {
759                       io.stld_nuke_query(w).valid && // query valid
760                       isAfter(s1_in.uop.robIdx, io.stld_nuke_query(w).bits.robIdx) && // older store
761                       s1_nuke_paddr_match(w) && // paddr match
762                       (s1_in.mask & io.stld_nuke_query(w).bits.mask).orR // data mask contain
763                      })).asUInt.orR && !s1_tlb_miss
764
765  s1_out                   := s1_in
766  s1_out.vaddr             := s1_vaddr
767  s1_out.paddr             := s1_paddr_dup_lsu
768  s1_out.tlbMiss           := s1_tlb_miss
769  s1_out.ptwBack           := io.tlb.resp.bits.ptwBack
770  s1_out.rsIdx             := s1_in.rsIdx
771  s1_out.rep_info.debug    := s1_in.uop.debugInfo
772  s1_out.rep_info.nuke     := s1_nuke && !s1_sw_prf
773  s1_out.delayedLoadError  := s1_dly_err
774
775  when (!s1_dly_err) {
776    // current ori test will cause the case of ldest == 0, below will be modifeid in the future.
777    // af & pf exception were modified
778    s1_out.uop.exceptionVec(loadPageFault)   := io.tlb.resp.bits.excp(0).pf.ld && s1_vecActive && !s1_tlb_miss
779    s1_out.uop.exceptionVec(loadAccessFault) := io.tlb.resp.bits.excp(0).af.ld && s1_vecActive && !s1_tlb_miss
780  } .otherwise {
781    s1_out.uop.exceptionVec(loadPageFault)      := false.B
782    s1_out.uop.exceptionVec(loadAddrMisaligned) := false.B
783    s1_out.uop.exceptionVec(loadAccessFault)    := s1_dly_err && s1_vecActive
784  }
785
786  // pointer chasing
787  val s1_try_ptr_chasing       = RegNext(s0_do_try_ptr_chasing, false.B)
788  val s1_ptr_chasing_vaddr     = RegEnable(s0_ptr_chasing_vaddr, s0_do_try_ptr_chasing)
789  val s1_fu_op_type_not_ld     = WireInit(false.B)
790  val s1_not_fast_match        = WireInit(false.B)
791  val s1_addr_mismatch         = WireInit(false.B)
792  val s1_addr_misaligned       = WireInit(false.B)
793  val s1_fast_mismatch         = WireInit(false.B)
794  val s1_ptr_chasing_canceled  = WireInit(false.B)
795  val s1_cancel_ptr_chasing    = WireInit(false.B)
796
797  s1_kill := s1_fast_rep_dly_kill ||
798             s1_cancel_ptr_chasing ||
799             s1_in.uop.robIdx.needFlush(io.redirect) ||
800            (s1_in.uop.robIdx.needFlush(RegNext(io.redirect)) && !RegNext(s0_try_ptr_chasing)) ||
801             RegEnable(s0_kill, false.B, io.ldin.valid || io.vecldin.valid || io.replay.valid || io.l2l_fwd_in.valid || io.fast_rep_in.valid)
802
803  if (EnableLoadToLoadForward) {
804    // Sometimes, we need to cancel the load-load forwarding.
805    // These can be put at S0 if timing is bad at S1.
806    // Case 0: CACHE_SET(base + offset) != CACHE_SET(base) (lowest 6-bit addition has an overflow)
807    s1_addr_mismatch     := s1_ptr_chasing_vaddr(6) ||
808                             RegEnable(io.ld_fast_imm(11, 6).orR, s0_do_try_ptr_chasing)
809    // Case 1: the address is not 64-bit aligned or the fuOpType is not LD
810    s1_addr_misaligned := s1_ptr_chasing_vaddr(2, 0).orR
811    s1_fu_op_type_not_ld := io.ldin.bits.uop.fuOpType =/= LSUOpType.ld
812    // Case 2: this load-load uop is cancelled
813    s1_ptr_chasing_canceled := !io.ldin.valid
814    // Case 3: fast mismatch
815    s1_fast_mismatch := RegEnable(!io.ld_fast_match, s0_do_try_ptr_chasing)
816
817    when (s1_try_ptr_chasing) {
818      s1_cancel_ptr_chasing := s1_addr_mismatch ||
819                               s1_addr_misaligned ||
820                               s1_fu_op_type_not_ld ||
821                               s1_ptr_chasing_canceled ||
822                               s1_fast_mismatch
823
824      s1_in.uop           := io.ldin.bits.uop
825      s1_in.rsIdx         := io.ldin.bits.iqIdx
826      s1_in.isFirstIssue  := io.ldin.bits.isFirstIssue
827      s1_vaddr_lo         := s1_ptr_chasing_vaddr(5, 0)
828      s1_paddr_dup_lsu    := Cat(io.tlb.resp.bits.paddr(0)(PAddrBits - 1, 6), s1_vaddr_lo)
829      s1_paddr_dup_dcache := Cat(io.tlb.resp.bits.paddr(0)(PAddrBits - 1, 6), s1_vaddr_lo)
830
831      // recored tlb time when get the data to ensure the correctness of the latency calculation (although it should not record in here, because it does not use tlb)
832      s1_in.uop.debugInfo.tlbFirstReqTime := GTimer()
833      s1_in.uop.debugInfo.tlbRespTime     := GTimer()
834    }
835    when (!s1_cancel_ptr_chasing) {
836      s0_ptr_chasing_canceled := s1_try_ptr_chasing && !io.replay.fire && !io.fast_rep_in.fire
837      when (s1_try_ptr_chasing) {
838        io.ldin.ready := true.B
839      }
840    }
841  }
842
843  // pre-calcuate sqIdx mask in s0, then send it to lsq in s1 for forwarding
844  val s1_sqIdx_mask = RegNext(UIntToMask(s0_out.uop.sqIdx.value, StoreQueueSize))
845  // to enable load-load, sqIdxMask must be calculated based on ldin.uop
846  // If the timing here is not OK, load-load forwarding has to be disabled.
847  // Or we calculate sqIdxMask at RS??
848  io.lsq.forward.sqIdxMask := s1_sqIdx_mask
849  if (EnableLoadToLoadForward) {
850    when (s1_try_ptr_chasing) {
851      io.lsq.forward.sqIdxMask := UIntToMask(io.ldin.bits.uop.sqIdx.value, StoreQueueSize)
852    }
853  }
854
855  io.forward_mshr.valid  := s1_valid && s1_out.forward_tlDchannel
856  io.forward_mshr.mshrid := s1_out.mshrid
857  io.forward_mshr.paddr  := s1_out.paddr
858
859  XSDebug(s1_valid,
860    p"S1: pc ${Hexadecimal(s1_out.uop.pc)}, lId ${Hexadecimal(s1_out.uop.lqIdx.asUInt)}, tlb_miss ${io.tlb.resp.bits.miss}, " +
861    p"paddr ${Hexadecimal(s1_out.paddr)}, mmio ${s1_out.mmio}\n")
862
863  // Pipeline
864  // --------------------------------------------------------------------------------
865  // stage 2
866  // --------------------------------------------------------------------------------
867  // s2: DCache resp
868  val s2_valid  = RegInit(false.B)
869  val s2_in     = Wire(new LqWriteBundle)
870  val s2_out    = Wire(new LqWriteBundle)
871  val s2_kill   = Wire(Bool())
872  val s2_can_go = s3_ready
873  val s2_fire   = s2_valid && !s2_kill && s2_can_go
874  val s2_vecActive = RegEnable(s1_out.vecActive, true.B, s1_fire)
875  val s2_isvec  = RegEnable(s1_out.isvec, false.B, s1_fire)
876  val s2_vec_alignedType = RegEnable(s1_vec_alignedType, s1_fire)
877
878  s2_kill := s2_in.uop.robIdx.needFlush(io.redirect)
879  s2_ready := !s2_valid || s2_kill || s3_ready
880  when (s1_fire) { s2_valid := true.B }
881  .elsewhen (s2_fire) { s2_valid := false.B }
882  .elsewhen (s2_kill) { s2_valid := false.B }
883  s2_in := RegEnable(s1_out, s1_fire)
884
885  val s2_pmp = WireInit(io.pmp)
886
887  val s2_prf    = s2_in.isPrefetch
888  val s2_hw_prf = s2_in.isHWPrefetch
889
890  // exception that may cause load addr to be invalid / illegal
891  // if such exception happen, that inst and its exception info
892  // will be force writebacked to rob
893  val s2_exception_vec = WireInit(s2_in.uop.exceptionVec)
894  when (!s2_in.delayedLoadError) {
895    s2_exception_vec(loadAccessFault) := (s2_in.uop.exceptionVec(loadAccessFault) || s2_pmp.ld ||
896                                       (io.dcache.resp.bits.tag_error && RegNext(io.csrCtrl.cache_error_enable))) && s2_vecActive
897  }
898
899  // soft prefetch will not trigger any exception (but ecc error interrupt may
900  // be triggered)
901  when (!s2_in.delayedLoadError && (s2_prf || s2_in.tlbMiss)) {
902    s2_exception_vec := 0.U.asTypeOf(s2_exception_vec.cloneType)
903  }
904  val s2_exception = ExceptionNO.selectByFu(s2_exception_vec, LduCfg).asUInt.orR && s2_vecActive
905
906  val (s2_fwd_frm_d_chan, s2_fwd_data_frm_d_chan) = io.tl_d_channel.forward(s1_valid && s1_out.forward_tlDchannel, s1_out.mshrid, s1_out.paddr)
907  val (s2_fwd_data_valid, s2_fwd_frm_mshr, s2_fwd_data_frm_mshr) = io.forward_mshr.forward()
908  val s2_fwd_frm_d_chan_or_mshr = s2_fwd_data_valid && (s2_fwd_frm_d_chan || s2_fwd_frm_mshr)
909
910  // writeback access fault caused by ecc error / bus error
911  // * ecc data error is slow to generate, so we will not use it until load stage 3
912  // * in load stage 3, an extra signal io.load_error will be used to
913  val s2_actually_mmio = s2_pmp.mmio
914  val s2_mmio          = !s2_prf &&
915                          s2_actually_mmio &&
916                         !s2_exception &&
917                         !s2_in.tlbMiss
918
919  val s2_full_fwd      = Wire(Bool())
920  val s2_mem_amb       = s2_in.uop.storeSetHit &&
921                         io.lsq.forward.addrInvalid
922
923  val s2_tlb_miss      = s2_in.tlbMiss
924  val s2_fwd_fail      = io.lsq.forward.dataInvalid
925  val s2_dcache_miss   = io.dcache.resp.bits.miss &&
926                         !s2_fwd_frm_d_chan_or_mshr &&
927                         !s2_full_fwd
928
929  val s2_mq_nack       = io.dcache.s2_mq_nack &&
930                         !s2_fwd_frm_d_chan_or_mshr &&
931                         !s2_full_fwd
932
933  val s2_bank_conflict = io.dcache.s2_bank_conflict &&
934                         !s2_fwd_frm_d_chan_or_mshr &&
935                         !s2_full_fwd
936
937  val s2_wpu_pred_fail = io.dcache.s2_wpu_pred_fail &&
938                        !s2_fwd_frm_d_chan_or_mshr &&
939                        !s2_full_fwd
940
941  val s2_rar_nack      = io.lsq.ldld_nuke_query.req.valid &&
942                         !io.lsq.ldld_nuke_query.req.ready
943
944  val s2_raw_nack      = io.lsq.stld_nuke_query.req.valid &&
945                         !io.lsq.stld_nuke_query.req.ready
946  // st-ld violation query
947  //  NeedFastRecovery Valid when
948  //  1. Fast recovery query request Valid.
949  //  2. Load instruction is younger than requestors(store instructions).
950  //  3. Physical address match.
951  //  4. Data contains.
952  val s2_nuke_paddr_match = VecInit((0 until StorePipelineWidth).map(w => {Mux(s2_in.isvec && s2_in.is128bit,
953    s2_in.paddr(PAddrBits-1, 4) === io.stld_nuke_query(w).bits.paddr(PAddrBits-1, 4),
954    s2_in.paddr(PAddrBits-1, 3) === io.stld_nuke_query(w).bits.paddr(PAddrBits-1, 3))}))
955  val s2_nuke          = VecInit((0 until StorePipelineWidth).map(w => {
956                          io.stld_nuke_query(w).valid && // query valid
957                          isAfter(s2_in.uop.robIdx, io.stld_nuke_query(w).bits.robIdx) && // older store
958                          s2_nuke_paddr_match(w) && // paddr match
959                          (s2_in.mask & io.stld_nuke_query(w).bits.mask).orR // data mask contain
960                        })).asUInt.orR && !s2_tlb_miss || s2_in.rep_info.nuke
961
962  val s2_cache_handled   = io.dcache.resp.bits.handled
963  val s2_cache_tag_error = RegNext(io.csrCtrl.cache_error_enable) &&
964                           io.dcache.resp.bits.tag_error
965
966  val s2_troublem        = !s2_exception &&
967                           !s2_mmio &&
968                           !s2_prf &&
969                           !s2_in.delayedLoadError
970
971  io.dcache.resp.ready  := true.B
972  val s2_dcache_should_resp = !(s2_in.tlbMiss || s2_exception || s2_in.delayedLoadError || s2_mmio || s2_prf)
973  assert(!(s2_valid && (s2_dcache_should_resp && !io.dcache.resp.valid)), "DCache response got lost")
974
975  // fast replay require
976  val s2_dcache_fast_rep = (s2_mq_nack || !s2_dcache_miss && (s2_bank_conflict || s2_wpu_pred_fail))
977  val s2_nuke_fast_rep   = !s2_mq_nack &&
978                           !s2_dcache_miss &&
979                           !s2_bank_conflict &&
980                           !s2_wpu_pred_fail &&
981                           !s2_rar_nack &&
982                           !s2_raw_nack &&
983                           s2_nuke
984
985  val s2_fast_rep = !s2_mem_amb &&
986                    !s2_tlb_miss &&
987                    !s2_fwd_fail &&
988                    (s2_dcache_fast_rep || s2_nuke_fast_rep) &&
989                    s2_troublem
990
991  // need allocate new entry
992  val s2_can_query = !s2_mem_amb &&
993                     !s2_tlb_miss &&
994                     !s2_fwd_fail &&
995                     s2_troublem
996
997  val s2_data_fwded = s2_dcache_miss && (s2_full_fwd || s2_cache_tag_error)
998
999  // ld-ld violation require
1000  io.lsq.ldld_nuke_query.req.valid           := s2_valid && s2_can_query
1001  io.lsq.ldld_nuke_query.req.bits.uop        := s2_in.uop
1002  io.lsq.ldld_nuke_query.req.bits.mask       := s2_in.mask
1003  io.lsq.ldld_nuke_query.req.bits.paddr      := s2_in.paddr
1004  io.lsq.ldld_nuke_query.req.bits.data_valid := Mux(s2_full_fwd || s2_fwd_data_valid, true.B, !s2_dcache_miss)
1005
1006  // st-ld violation require
1007  io.lsq.stld_nuke_query.req.valid           := s2_valid && s2_can_query
1008  io.lsq.stld_nuke_query.req.bits.uop        := s2_in.uop
1009  io.lsq.stld_nuke_query.req.bits.mask       := s2_in.mask
1010  io.lsq.stld_nuke_query.req.bits.paddr      := s2_in.paddr
1011  io.lsq.stld_nuke_query.req.bits.data_valid := Mux(s2_full_fwd || s2_fwd_data_valid, true.B, !s2_dcache_miss)
1012
1013  // merge forward result
1014  // lsq has higher priority than sbuffer
1015  val s2_fwd_mask = Wire(Vec((VLEN/8), Bool()))
1016  val s2_fwd_data = Wire(Vec((VLEN/8), UInt(8.W)))
1017  s2_full_fwd := ((~s2_fwd_mask.asUInt).asUInt & s2_in.mask) === 0.U && !io.lsq.forward.dataInvalid
1018  // generate XLEN/8 Muxs
1019  for (i <- 0 until VLEN / 8) {
1020    s2_fwd_mask(i) := io.lsq.forward.forwardMask(i) || io.sbuffer.forwardMask(i)
1021    s2_fwd_data(i) := Mux(io.lsq.forward.forwardMask(i), io.lsq.forward.forwardData(i), io.sbuffer.forwardData(i))
1022  }
1023
1024  XSDebug(s2_fire, "[FWD LOAD RESP] pc %x fwd %x(%b) + %x(%b)\n",
1025    s2_in.uop.pc,
1026    io.lsq.forward.forwardData.asUInt, io.lsq.forward.forwardMask.asUInt,
1027    s2_in.forwardData.asUInt, s2_in.forwardMask.asUInt
1028  )
1029
1030  //
1031  s2_out                     := s2_in
1032  s2_out.data                := 0.U // data will be generated in load s3
1033  s2_out.uop.fpWen           := s2_in.uop.fpWen && !s2_exception
1034  s2_out.mmio                := s2_mmio
1035  s2_out.uop.flushPipe       := false.B
1036  s2_out.uop.exceptionVec    := s2_exception_vec
1037  s2_out.forwardMask         := s2_fwd_mask
1038  s2_out.forwardData         := s2_fwd_data
1039  s2_out.handledByMSHR       := s2_cache_handled
1040  s2_out.miss                := s2_dcache_miss && s2_troublem
1041  s2_out.feedbacked          := io.feedback_fast.valid
1042
1043  // Generate replay signal caused by:
1044  // * st-ld violation check
1045  // * tlb miss
1046  // * dcache replay
1047  // * forward data invalid
1048  // * dcache miss
1049  s2_out.rep_info.mem_amb         := s2_mem_amb && s2_troublem
1050  s2_out.rep_info.tlb_miss        := s2_tlb_miss && s2_troublem
1051  s2_out.rep_info.fwd_fail        := s2_fwd_fail && s2_troublem
1052  s2_out.rep_info.dcache_rep      := s2_mq_nack && s2_troublem
1053  s2_out.rep_info.dcache_miss     := s2_dcache_miss && s2_troublem
1054  s2_out.rep_info.bank_conflict   := s2_bank_conflict && s2_troublem
1055  s2_out.rep_info.wpu_fail        := s2_wpu_pred_fail && s2_troublem
1056  s2_out.rep_info.rar_nack        := s2_rar_nack && s2_troublem
1057  s2_out.rep_info.raw_nack        := s2_raw_nack && s2_troublem
1058  s2_out.rep_info.nuke            := s2_nuke && s2_troublem
1059  s2_out.rep_info.full_fwd        := s2_data_fwded
1060  s2_out.rep_info.data_inv_sq_idx := io.lsq.forward.dataInvalidSqIdx
1061  s2_out.rep_info.addr_inv_sq_idx := io.lsq.forward.addrInvalidSqIdx
1062  s2_out.rep_info.rep_carry       := io.dcache.resp.bits.replayCarry
1063  s2_out.rep_info.mshr_id         := io.dcache.resp.bits.mshr_id
1064  s2_out.rep_info.last_beat       := s2_in.paddr(log2Up(refillBytes))
1065  s2_out.rep_info.debug           := s2_in.uop.debugInfo
1066  s2_out.rep_info.tlb_id          := io.tlb_hint.id
1067  s2_out.rep_info.tlb_full        := io.tlb_hint.full
1068
1069  // if forward fail, replay this inst from fetch
1070  val debug_fwd_fail_rep = s2_fwd_fail && !s2_troublem && !s2_in.tlbMiss
1071  // if ld-ld violation is detected, replay from this inst from fetch
1072  val debug_ldld_nuke_rep = false.B // s2_ldld_violation && !s2_mmio && !s2_is_prefetch && !s2_in.tlbMiss
1073
1074  // to be removed
1075  io.feedback_fast.valid                 := false.B
1076  io.feedback_fast.bits.hit              := false.B
1077  io.feedback_fast.bits.flushState       := s2_in.ptwBack
1078  io.feedback_fast.bits.robIdx           := s2_in.uop.robIdx
1079  io.feedback_fast.bits.sourceType       := RSFeedbackType.lrqFull
1080  io.feedback_fast.bits.dataInvalidSqIdx := DontCare
1081
1082  io.ldCancel.ld1Cancel := false.B
1083
1084  // fast wakeup
1085  io.fast_uop.valid := RegNext(
1086    !io.dcache.s1_disable_fast_wakeup &&
1087    s1_valid &&
1088    !s1_kill &&
1089    !io.tlb.resp.bits.miss &&
1090    !io.lsq.forward.dataInvalidFast
1091  ) && (s2_valid && !s2_out.rep_info.need_rep && !s2_mmio) && !s2_isvec
1092  io.fast_uop.bits := RegNext(s1_out.uop)
1093
1094  //
1095  io.s2_ptr_chasing                    := RegEnable(s1_try_ptr_chasing && !s1_cancel_ptr_chasing, false.B, s1_fire)
1096
1097  // RegNext prefetch train for better timing
1098  // ** Now, prefetch train is valid at load s3 **
1099  io.prefetch_train.valid              := RegNext(s2_valid && !s2_actually_mmio && !s2_in.tlbMiss)
1100  io.prefetch_train.bits.fromLsPipelineBundle(s2_in, latch = true)
1101  io.prefetch_train.bits.miss          := RegNext(io.dcache.resp.bits.miss) // TODO: use trace with bank conflict?
1102  io.prefetch_train.bits.meta_prefetch := RegNext(io.dcache.resp.bits.meta_prefetch)
1103  io.prefetch_train.bits.meta_access   := RegNext(io.dcache.resp.bits.meta_access)
1104
1105  io.prefetch_train_l1.valid              := RegNext(s2_valid && !s2_actually_mmio)
1106  io.prefetch_train_l1.bits.fromLsPipelineBundle(s2_in, latch = true)
1107  io.prefetch_train_l1.bits.miss          := RegNext(io.dcache.resp.bits.miss)
1108  io.prefetch_train_l1.bits.meta_prefetch := RegNext(io.dcache.resp.bits.meta_prefetch)
1109  io.prefetch_train_l1.bits.meta_access   := RegNext(io.dcache.resp.bits.meta_access)
1110  if (env.FPGAPlatform){
1111    io.dcache.s0_pc := DontCare
1112    io.dcache.s1_pc := DontCare
1113    io.dcache.s2_pc := DontCare
1114  }else{
1115    io.dcache.s0_pc := s0_out.uop.pc
1116    io.dcache.s1_pc := s1_out.uop.pc
1117    io.dcache.s2_pc := s2_out.uop.pc
1118  }
1119  io.dcache.s2_kill := s2_pmp.ld || s2_actually_mmio || s2_kill
1120
1121  val s1_ld_left_fire = s1_valid && !s1_kill && s2_ready
1122  val s2_ld_valid_dup = RegInit(0.U(6.W))
1123  s2_ld_valid_dup := 0x0.U(6.W)
1124  when (s1_ld_left_fire && !s1_out.isHWPrefetch) { s2_ld_valid_dup := 0x3f.U(6.W) }
1125  when (s1_kill || s1_out.isHWPrefetch) { s2_ld_valid_dup := 0x0.U(6.W) }
1126  assert(RegNext((s2_valid === s2_ld_valid_dup(0)) || RegNext(s1_out.isHWPrefetch)))
1127
1128  // Pipeline
1129  // --------------------------------------------------------------------------------
1130  // stage 3
1131  // --------------------------------------------------------------------------------
1132  // writeback and update load queue
1133  val s3_valid        = RegNext(s2_valid && !s2_out.isHWPrefetch && !s2_out.uop.robIdx.needFlush(io.redirect))
1134  val s3_in           = RegEnable(s2_out, s2_fire)
1135  val s3_out          = Wire(Valid(new MemExuOutput))
1136  val s3_dcache_rep   = RegEnable(s2_dcache_fast_rep && s2_troublem, false.B, s2_fire)
1137  val s3_ld_valid_dup = RegEnable(s2_ld_valid_dup, s2_fire)
1138  val s3_fast_rep     = Wire(Bool())
1139  val s3_troublem     = RegNext(s2_troublem)
1140  val s3_kill         = s3_in.uop.robIdx.needFlush(io.redirect)
1141  val s3_vecout       = Wire(new OnlyVecExuOutput)
1142  val s3_vecActive    = RegEnable(s2_out.vecActive, true.B, s2_fire)
1143  val s3_isvec        = RegEnable(s2_out.isvec, false.B, s2_fire)
1144  val s3_vec_alignedType = RegEnable(s2_vec_alignedType, s2_fire)
1145  val s3_mmio         = Wire(chiselTypeOf(io.lsq.uncache))
1146  // TODO: Fix vector load merge buffer nack
1147  val s3_vec_mb_nack  = Wire(Bool())
1148  s3_vec_mb_nack     := false.B
1149  XSError(s3_valid && s3_vec_mb_nack, "Merge buffer should always accept vector loads!")
1150
1151  s3_ready := !s3_valid || s3_kill || io.ldout.ready
1152  s3_mmio.valid := RegNextN(io.lsq.uncache.valid, 3, Some(false.B))
1153  s3_mmio.ready := RegNextN(io.lsq.uncache.ready, 3, Some(false.B))
1154  s3_mmio.bits  := RegNextN(io.lsq.uncache.bits, 3)
1155
1156  // forwrad last beat
1157  val (s3_fwd_frm_d_chan, s3_fwd_data_frm_d_chan) = io.tl_d_channel.forward(s2_valid && s2_out.forward_tlDchannel, s2_out.mshrid, s2_out.paddr)
1158  val s3_fwd_data_valid = RegEnable(s2_fwd_data_valid, false.B, s2_valid)
1159  val s3_fwd_frm_d_chan_valid = (s3_fwd_frm_d_chan && s3_fwd_data_valid && s3_in.handledByMSHR)
1160  val s3_fast_rep_canceled = io.replay.valid && io.replay.bits.forward_tlDchannel || !io.dcache.req.ready
1161
1162  // s3 load fast replay
1163  io.fast_rep_out.valid := s3_valid && s3_fast_rep && !s3_in.uop.robIdx.needFlush(io.redirect)
1164  io.fast_rep_out.bits := s3_in
1165
1166  io.lsq.ldin.valid := s3_valid && (!s3_fast_rep || s3_fast_rep_canceled) && !s3_in.feedbacked
1167  // TODO: check this --by hx
1168  // io.lsq.ldin.valid := s3_valid && (!s3_fast_rep || !io.fast_rep_out.ready) && !s3_in.feedbacked && !s3_in.lateKill
1169  io.lsq.ldin.bits := s3_in
1170  io.lsq.ldin.bits.miss := s3_in.miss && !s3_fwd_frm_d_chan_valid
1171
1172  /* <------- DANGEROUS: Don't change sequence here ! -------> */
1173  io.lsq.ldin.bits.data_wen_dup := s3_ld_valid_dup.asBools
1174  io.lsq.ldin.bits.replacementUpdated := io.dcache.resp.bits.replacementUpdated
1175  io.lsq.ldin.bits.missDbUpdated := RegNext(s2_fire && s2_in.hasROBEntry && !s2_in.tlbMiss && !s2_in.missDbUpdated)
1176
1177  val s3_dly_ld_err =
1178    if (EnableAccurateLoadError) {
1179      io.dcache.resp.bits.error_delayed && RegNext(io.csrCtrl.cache_error_enable) && s3_troublem
1180    } else {
1181      WireInit(false.B)
1182    }
1183  io.s3_dly_ld_err := false.B // s3_dly_ld_err && s3_valid
1184  io.lsq.ldin.bits.dcacheRequireReplay  := s3_dcache_rep
1185  io.fast_rep_out.bits.delayedLoadError := s3_dly_ld_err
1186
1187  val s3_vp_match_fail = RegNext(io.lsq.forward.matchInvalid || io.sbuffer.matchInvalid) && s3_troublem
1188  val s3_rep_frm_fetch = s3_vp_match_fail
1189  val s3_ldld_rep_inst =
1190      io.lsq.ldld_nuke_query.resp.valid &&
1191      io.lsq.ldld_nuke_query.resp.bits.rep_frm_fetch &&
1192      RegNext(io.csrCtrl.ldld_vio_check_enable)
1193  val s3_flushPipe = s3_ldld_rep_inst
1194
1195  val s3_rep_info = WireInit(s3_in.rep_info)
1196  s3_rep_info.dcache_miss   := s3_in.rep_info.dcache_miss && !s3_fwd_frm_d_chan_valid
1197  val s3_sel_rep_cause = PriorityEncoderOH(s3_rep_info.cause.asUInt)
1198
1199  val s3_exception = ExceptionNO.selectByFu(s3_in.uop.exceptionVec, LduCfg).asUInt.orR && s3_vecActive
1200  when (s3_exception || s3_dly_ld_err || s3_rep_frm_fetch) {
1201    io.lsq.ldin.bits.rep_info.cause := 0.U.asTypeOf(s3_rep_info.cause.cloneType)
1202  } .otherwise {
1203    io.lsq.ldin.bits.rep_info.cause := VecInit(s3_sel_rep_cause.asBools)
1204  }
1205
1206  // Int load, if hit, will be writebacked at s3
1207  s3_out.valid                := s3_valid && !io.lsq.ldin.bits.rep_info.need_rep && !s3_in.mmio
1208  s3_out.bits.uop             := s3_in.uop
1209  s3_out.bits.uop.exceptionVec(loadAccessFault) := (s3_dly_ld_err || s3_in.uop.exceptionVec(loadAccessFault)) && s3_vecActive
1210  s3_out.bits.uop.flushPipe   := false.B
1211  s3_out.bits.uop.replayInst  := s3_rep_frm_fetch || s3_flushPipe
1212  s3_out.bits.data            := s3_in.data
1213  s3_out.bits.debug.isMMIO    := s3_in.mmio
1214  s3_out.bits.debug.isPerfCnt := false.B
1215  s3_out.bits.debug.paddr     := s3_in.paddr
1216  s3_out.bits.debug.vaddr     := s3_in.vaddr
1217
1218  // Vector load, writeback to merge buffer
1219  // TODO: Add assertion in merge buffer, merge buffer must accept vec load writeback
1220  s3_vecout.isvec             := s3_isvec
1221  s3_vecout.vecdata           := 0.U // Data will be assigned later
1222  s3_vecout.mask              := s3_in.mask
1223  // s3_vecout.rob_idx_valid     := s3_in.rob_idx_valid
1224  // s3_vecout.inner_idx         := s3_in.inner_idx
1225  // s3_vecout.rob_idx           := s3_in.rob_idx
1226  // s3_vecout.offset            := s3_in.offset
1227  s3_vecout.reg_offset        := s3_in.reg_offset
1228  s3_vecout.vecActive         := s3_vecActive
1229  s3_vecout.is_first_ele      := s3_in.is_first_ele
1230  // s3_vecout.uopQueuePtr       := DontCare // uopQueuePtr is already saved in flow queue
1231  // s3_vecout.flowPtr           := s3_in.flowPtr
1232  s3_vecout.elemIdx           := DontCare // elemIdx is already saved in flow queue
1233  s3_vecout.elemIdxInsideVd   := DontCare
1234
1235  io.rollback.valid := s3_valid && (s3_rep_frm_fetch || s3_flushPipe) && !s3_exception
1236  io.rollback.bits             := DontCare
1237  io.rollback.bits.isRVC       := s3_out.bits.uop.preDecodeInfo.isRVC
1238  io.rollback.bits.robIdx      := s3_out.bits.uop.robIdx
1239  io.rollback.bits.ftqIdx      := s3_out.bits.uop.ftqPtr
1240  io.rollback.bits.ftqOffset   := s3_out.bits.uop.ftqOffset
1241  io.rollback.bits.level       := Mux(s3_rep_frm_fetch, RedirectLevel.flush, RedirectLevel.flushAfter)
1242  io.rollback.bits.cfiUpdate.target := s3_out.bits.uop.pc
1243  io.rollback.bits.debug_runahead_checkpoint_id := s3_out.bits.uop.debugInfo.runahead_checkpoint_id
1244  /* <------- DANGEROUS: Don't change sequence here ! -------> */
1245
1246  io.lsq.ldin.bits.uop := s3_out.bits.uop
1247
1248  val s3_revoke = s3_exception || io.lsq.ldin.bits.rep_info.need_rep
1249  io.lsq.ldld_nuke_query.revoke := s3_revoke
1250  io.lsq.stld_nuke_query.revoke := s3_revoke
1251
1252  // feedback slow
1253  s3_fast_rep := RegNext(s2_fast_rep)
1254
1255  val s3_fb_no_waiting = !s3_in.isLoadReplay &&
1256                        (!(s3_fast_rep && !s3_fast_rep_canceled)) &&
1257                        !s3_in.feedbacked
1258
1259  // feedback: scalar load will send feedback to RS
1260  //           vector load will send signal to VL Merge Buffer, then send feedback at granularity of uops
1261  io.feedback_slow.valid                 := s3_valid && s3_fb_no_waiting && !s3_isvec
1262  io.feedback_slow.bits.hit              := !s3_rep_info.need_rep || io.lsq.ldin.ready
1263  io.feedback_slow.bits.flushState       := s3_in.ptwBack
1264  io.feedback_slow.bits.robIdx           := s3_in.uop.robIdx
1265  io.feedback_slow.bits.sourceType       := RSFeedbackType.lrqFull
1266  io.feedback_slow.bits.dataInvalidSqIdx := DontCare
1267
1268  io.ldCancel.ld2Cancel := s3_valid && (
1269    io.lsq.ldin.bits.rep_info.need_rep ||                       // exe fail or
1270    s3_in.mmio                                                  // is mmio
1271  )
1272
1273  val s3_ld_wb_meta = Mux(s3_valid, s3_out.bits, s3_mmio.bits)
1274
1275  // data from load queue refill
1276  val s3_ld_raw_data_frm_uncache = RegNextN(io.lsq.ld_raw_data, 3)
1277  val s3_merged_data_frm_uncache = s3_ld_raw_data_frm_uncache.mergedData()
1278  val s3_picked_data_frm_uncache = LookupTree(s3_ld_raw_data_frm_uncache.addrOffset, List(
1279    "b000".U -> s3_merged_data_frm_uncache(63,  0),
1280    "b001".U -> s3_merged_data_frm_uncache(63,  8),
1281    "b010".U -> s3_merged_data_frm_uncache(63, 16),
1282    "b011".U -> s3_merged_data_frm_uncache(63, 24),
1283    "b100".U -> s3_merged_data_frm_uncache(63, 32),
1284    "b101".U -> s3_merged_data_frm_uncache(63, 40),
1285    "b110".U -> s3_merged_data_frm_uncache(63, 48),
1286    "b111".U -> s3_merged_data_frm_uncache(63, 56)
1287  ))
1288  val s3_ld_data_frm_uncache = rdataHelper(s3_ld_raw_data_frm_uncache.uop, s3_picked_data_frm_uncache)
1289
1290  // data from dcache hit
1291  val s3_ld_raw_data_frm_cache = Wire(new LoadDataFromDcacheBundle)
1292  s3_ld_raw_data_frm_cache.respDcacheData       := io.dcache.resp.bits.data_delayed
1293  s3_ld_raw_data_frm_cache.forwardMask          := RegEnable(s2_fwd_mask, s2_valid)
1294  s3_ld_raw_data_frm_cache.forwardData          := RegEnable(s2_fwd_data, s2_valid)
1295  s3_ld_raw_data_frm_cache.uop                  := RegEnable(s2_out.uop, s2_valid)
1296  s3_ld_raw_data_frm_cache.addrOffset           := RegEnable(s2_out.paddr(3, 0), s2_valid)
1297  s3_ld_raw_data_frm_cache.forward_D            := RegEnable(s2_fwd_frm_d_chan, false.B, s2_valid) || s3_fwd_frm_d_chan_valid
1298  s3_ld_raw_data_frm_cache.forwardData_D        := Mux(s3_fwd_frm_d_chan_valid, s3_fwd_data_frm_d_chan, RegEnable(s2_fwd_data_frm_d_chan, s2_valid))
1299  s3_ld_raw_data_frm_cache.forward_mshr         := RegEnable(s2_fwd_frm_mshr, false.B, s2_valid)
1300  s3_ld_raw_data_frm_cache.forwardData_mshr     := RegEnable(s2_fwd_data_frm_mshr, s2_valid)
1301  s3_ld_raw_data_frm_cache.forward_result_valid := RegEnable(s2_fwd_data_valid, false.B, s2_valid)
1302
1303  val s3_merged_data_frm_cache = s3_ld_raw_data_frm_cache.mergedData()
1304  val s3_picked_data_frm_cache = LookupTree(s3_ld_raw_data_frm_cache.addrOffset, List(
1305    "b0000".U -> s3_merged_data_frm_cache(63,    0),
1306    "b0001".U -> s3_merged_data_frm_cache(63,    8),
1307    "b0010".U -> s3_merged_data_frm_cache(63,   16),
1308    "b0011".U -> s3_merged_data_frm_cache(63,   24),
1309    "b0100".U -> s3_merged_data_frm_cache(63,   32),
1310    "b0101".U -> s3_merged_data_frm_cache(63,   40),
1311    "b0110".U -> s3_merged_data_frm_cache(63,   48),
1312    "b0111".U -> s3_merged_data_frm_cache(63,   56),
1313    "b1000".U -> s3_merged_data_frm_cache(127,  64),
1314    "b1001".U -> s3_merged_data_frm_cache(127,  72),
1315    "b1010".U -> s3_merged_data_frm_cache(127,  80),
1316    "b1011".U -> s3_merged_data_frm_cache(127,  88),
1317    "b1100".U -> s3_merged_data_frm_cache(127,  96),
1318    "b1101".U -> s3_merged_data_frm_cache(127, 104),
1319    "b1110".U -> s3_merged_data_frm_cache(127, 112),
1320    "b1111".U -> s3_merged_data_frm_cache(127, 120)
1321  ))
1322  val s3_ld_data_frm_cache = rdataHelper(s3_ld_raw_data_frm_cache.uop, s3_picked_data_frm_cache)
1323
1324  // FIXME: add 1 cycle delay ?
1325  // io.lsq.uncache.ready := !s3_valid
1326  io.ldout.bits        := s3_ld_wb_meta
1327  io.ldout.bits.data   := Mux(s3_valid, s3_ld_data_frm_cache, s3_ld_data_frm_uncache)
1328  io.ldout.valid       := (s3_out.valid || (s3_mmio.valid && !s3_valid)) && !s3_vecout.isvec
1329
1330  // TODO: check this --hx
1331  // io.ldout.valid       := s3_out.valid && !s3_out.bits.uop.robIdx.needFlush(io.redirect) && !s3_vecout.isvec ||
1332  //   io.lsq.uncache.valid && !io.lsq.uncache.bits.uop.robIdx.needFlush(io.redirect) && !s3_out.valid && !io.lsq.uncache.bits.isVls
1333  //  io.ldout.bits.data   := Mux(s3_out.valid, s3_ld_data_frm_cache, s3_ld_data_frm_uncache)
1334  //  io.ldout.valid       := s3_out.valid && !s3_out.bits.uop.robIdx.needFlush(io.redirect) ||
1335  //                         s3_mmio.valid && !s3_mmio.bits.uop.robIdx.needFlush(io.redirect) && !s3_out.valid
1336
1337  // s3 load fast replay
1338  io.fast_rep_out.valid := s3_valid && s3_fast_rep
1339  io.fast_rep_out.bits := s3_in
1340  io.fast_rep_out.bits.lateKill := s3_rep_frm_fetch
1341
1342  val vecFeedback = s3_valid && s3_fb_no_waiting && s3_rep_info.need_rep && !io.lsq.ldin.ready && s3_isvec
1343
1344  // vector output
1345  io.vecldout.bits.vec := s3_vecout
1346  // FIXME
1347  io.vecldout.bits.isPackage := DontCare
1348  io.vecldout.bits.packageNum := DontCare
1349  io.vecldout.bits.originAlignedType := DontCare
1350  io.vecldout.bits.alignedType := s3_vec_alignedType
1351  // vec feedback
1352  io.vecldout.bits.vecFeedback := vecFeedback
1353  // TODO: VLSU, uncache data logic
1354  val vecdata = rdataVecHelper(s3_vec_alignedType(1,0), s3_picked_data_frm_cache)
1355  io.vecldout.bits.vec.vecdata := Mux(s3_in.is128bit, s3_merged_data_frm_cache, vecdata)
1356  io.vecldout.bits.data := 0.U
1357  // io.vecldout.bits.fflags := s3_out.bits.fflags
1358  // io.vecldout.bits.redirectValid := s3_out.bits.redirectValid
1359  // io.vecldout.bits.redirect := s3_out.bits.redirect
1360  io.vecldout.bits.debug := s3_out.bits.debug
1361  io.vecldout.bits.uop := s3_out.bits.uop
1362  io.vecldout.valid := s3_out.valid && !s3_out.bits.uop.robIdx.needFlush(io.redirect) && s3_vecout.isvec ||
1363  // TODO: check this, why !io.lsq.uncache.bits.isVls before?
1364    io.lsq.uncache.valid && !io.lsq.uncache.bits.uop.robIdx.needFlush(io.redirect) && !s3_out.valid && io.lsq.uncache.bits.isVls
1365    //io.lsq.uncache.valid && !io.lsq.uncache.bits.uop.robIdx.needFlush(io.redirect) && !s3_out.valid && !io.lsq.uncache.bits.isVls
1366
1367  // fast load to load forward
1368  if (EnableLoadToLoadForward) {
1369    io.l2l_fwd_out.valid      := s3_valid && !s3_in.mmio && !s3_rep_info.need_rep
1370    io.l2l_fwd_out.data       := Mux(s3_in.vaddr(3), s3_merged_data_frm_cache(127, 64), s3_merged_data_frm_cache(63, 0))
1371    io.l2l_fwd_out.dly_ld_err := s3_dly_ld_err || // ecc delayed error
1372                                 s3_ldld_rep_inst ||
1373                                 s3_rep_frm_fetch
1374  } else {
1375    io.l2l_fwd_out.valid := false.B
1376    io.l2l_fwd_out.data := DontCare
1377    io.l2l_fwd_out.dly_ld_err := DontCare
1378  }
1379
1380   // trigger
1381  val last_valid_data = RegNext(RegEnable(io.ldout.bits.data, io.ldout.fire))
1382  val hit_ld_addr_trig_hit_vec = Wire(Vec(TriggerNum, Bool()))
1383  val lq_ld_addr_trig_hit_vec = io.lsq.trigger.lqLoadAddrTriggerHitVec
1384  (0 until TriggerNum).map{i => {
1385    val tdata2    = RegNext(io.trigger(i).tdata2)
1386    val matchType = RegNext(io.trigger(i).matchType)
1387    val tEnable   = RegNext(io.trigger(i).tEnable)
1388
1389    hit_ld_addr_trig_hit_vec(i) := TriggerCmp(RegNext(s2_out.vaddr), tdata2, matchType, tEnable)
1390    io.trigger(i).addrHit       := Mux(s3_out.valid, hit_ld_addr_trig_hit_vec(i), lq_ld_addr_trig_hit_vec(i))
1391  }}
1392  io.lsq.trigger.hitLoadAddrTriggerHitVec := hit_ld_addr_trig_hit_vec
1393
1394  // FIXME: please move this part to LoadQueueReplay
1395  io.debug_ls := DontCare
1396
1397  // Topdown
1398  io.lsTopdownInfo.s1.robIdx          := s1_in.uop.robIdx.value
1399  io.lsTopdownInfo.s1.vaddr_valid     := s1_valid && s1_in.hasROBEntry
1400  io.lsTopdownInfo.s1.vaddr_bits      := s1_vaddr
1401  io.lsTopdownInfo.s2.robIdx          := s2_in.uop.robIdx.value
1402  io.lsTopdownInfo.s2.paddr_valid     := s2_fire && s2_in.hasROBEntry && !s2_in.tlbMiss
1403  io.lsTopdownInfo.s2.paddr_bits      := s2_in.paddr
1404  io.lsTopdownInfo.s2.first_real_miss := io.dcache.resp.bits.real_miss
1405  io.lsTopdownInfo.s2.cache_miss_en   := s2_fire && s2_in.hasROBEntry && !s2_in.tlbMiss && !s2_in.missDbUpdated
1406
1407  // perf cnt
1408  XSPerfAccumulate("s0_in_valid",                  io.ldin.valid)
1409  XSPerfAccumulate("s0_in_block",                  io.ldin.valid && !io.ldin.fire)
1410  XSPerfAccumulate("s0_in_fire_first_issue",       s0_valid && s0_sel_src.isFirstIssue)
1411  XSPerfAccumulate("s0_lsq_fire_first_issue",      io.replay.fire)
1412  XSPerfAccumulate("s0_ldu_fire_first_issue",      io.ldin.fire && s0_sel_src.isFirstIssue)
1413  XSPerfAccumulate("s0_fast_replay_issue",         io.fast_rep_in.fire)
1414  XSPerfAccumulate("s0_stall_out",                 s0_valid && !s0_can_go)
1415  XSPerfAccumulate("s0_stall_dcache",              s0_valid && !io.dcache.req.ready)
1416  XSPerfAccumulate("s0_addr_spec_success",         s0_fire && s0_sel_src.vaddr(VAddrBits-1, 12) === io.ldin.bits.src(0)(VAddrBits-1, 12))
1417  XSPerfAccumulate("s0_addr_spec_failed",          s0_fire && s0_sel_src.vaddr(VAddrBits-1, 12) =/= io.ldin.bits.src(0)(VAddrBits-1, 12))
1418  XSPerfAccumulate("s0_addr_spec_success_once",    s0_fire && s0_sel_src.vaddr(VAddrBits-1, 12) === io.ldin.bits.src(0)(VAddrBits-1, 12) && s0_sel_src.isFirstIssue)
1419  XSPerfAccumulate("s0_addr_spec_failed_once",     s0_fire && s0_sel_src.vaddr(VAddrBits-1, 12) =/= io.ldin.bits.src(0)(VAddrBits-1, 12) && s0_sel_src.isFirstIssue)
1420  XSPerfAccumulate("s0_forward_tl_d_channel",      s0_out.forward_tlDchannel)
1421  XSPerfAccumulate("s0_hardware_prefetch_fire",    s0_fire && s0_hw_prf_select)
1422  XSPerfAccumulate("s0_software_prefetch_fire",    s0_fire && s0_sel_src.prf && s0_int_iss_select)
1423  XSPerfAccumulate("s0_hardware_prefetch_blocked", io.prefetch_req.valid && !s0_hw_prf_select)
1424  XSPerfAccumulate("s0_hardware_prefetch_total",   io.prefetch_req.valid)
1425
1426  XSPerfAccumulate("s1_in_valid",                  s1_valid)
1427  XSPerfAccumulate("s1_in_fire",                   s1_fire)
1428  XSPerfAccumulate("s1_in_fire_first_issue",       s1_fire && s1_in.isFirstIssue)
1429  XSPerfAccumulate("s1_tlb_miss",                  s1_fire && s1_tlb_miss)
1430  XSPerfAccumulate("s1_tlb_miss_first_issue",      s1_fire && s1_tlb_miss && s1_in.isFirstIssue)
1431  XSPerfAccumulate("s1_stall_out",                 s1_valid && !s1_can_go)
1432  XSPerfAccumulate("s1_dly_err",                   s1_valid && s1_fast_rep_dly_err)
1433
1434  XSPerfAccumulate("s2_in_valid",                  s2_valid)
1435  XSPerfAccumulate("s2_in_fire",                   s2_fire)
1436  XSPerfAccumulate("s2_in_fire_first_issue",       s2_fire && s2_in.isFirstIssue)
1437  XSPerfAccumulate("s2_dcache_miss",               s2_fire && io.dcache.resp.bits.miss)
1438  XSPerfAccumulate("s2_dcache_miss_first_issue",   s2_fire && io.dcache.resp.bits.miss && s2_in.isFirstIssue)
1439  XSPerfAccumulate("s2_dcache_real_miss_first_issue",   s2_fire && io.dcache.resp.bits.miss && s2_in.isFirstIssue)
1440  XSPerfAccumulate("s2_full_forward",              s2_fire && s2_full_fwd)
1441  XSPerfAccumulate("s2_dcache_miss_full_forward",  s2_fire && s2_dcache_miss)
1442  XSPerfAccumulate("s2_fwd_frm_d_can",             s2_valid && s2_fwd_frm_d_chan)
1443  XSPerfAccumulate("s2_fwd_frm_d_chan_or_mshr",    s2_valid && s2_fwd_frm_d_chan_or_mshr)
1444  XSPerfAccumulate("s2_stall_out",                 s2_fire && !s2_can_go)
1445  XSPerfAccumulate("s2_prefetch",                  s2_fire && s2_prf)
1446  XSPerfAccumulate("s2_prefetch_ignored",          s2_fire && s2_prf && s2_mq_nack) // ignore prefetch for mshr full / miss req port conflict
1447  XSPerfAccumulate("s2_prefetch_miss",             s2_fire && s2_prf && io.dcache.resp.bits.miss) // prefetch req miss in l1
1448  XSPerfAccumulate("s2_prefetch_hit",              s2_fire && s2_prf && !io.dcache.resp.bits.miss) // prefetch req hit in l1
1449  XSPerfAccumulate("s2_prefetch_accept",           s2_fire && s2_prf && io.dcache.resp.bits.miss && !s2_mq_nack) // prefetch a missed line in l1, and l1 accepted it
1450  XSPerfAccumulate("s2_forward_req",               s2_fire && s2_in.forward_tlDchannel)
1451  XSPerfAccumulate("s2_successfully_forward_channel_D", s2_fire && s2_fwd_frm_d_chan && s2_fwd_data_valid)
1452  XSPerfAccumulate("s2_successfully_forward_mshr",      s2_fire && s2_fwd_frm_mshr && s2_fwd_data_valid)
1453
1454  XSPerfAccumulate("s3_fwd_frm_d_chan",            s3_valid && s3_fwd_frm_d_chan_valid)
1455
1456  XSPerfAccumulate("load_to_load_forward",                      s1_try_ptr_chasing && !s1_ptr_chasing_canceled)
1457  XSPerfAccumulate("load_to_load_forward_try",                  s1_try_ptr_chasing)
1458  XSPerfAccumulate("load_to_load_forward_fail",                 s1_cancel_ptr_chasing)
1459  XSPerfAccumulate("load_to_load_forward_fail_cancelled",       s1_cancel_ptr_chasing && s1_ptr_chasing_canceled)
1460  XSPerfAccumulate("load_to_load_forward_fail_wakeup_mismatch", s1_cancel_ptr_chasing && !s1_ptr_chasing_canceled && s1_not_fast_match)
1461  XSPerfAccumulate("load_to_load_forward_fail_op_not_ld",       s1_cancel_ptr_chasing && !s1_ptr_chasing_canceled && !s1_not_fast_match && s1_fu_op_type_not_ld)
1462  XSPerfAccumulate("load_to_load_forward_fail_addr_align",      s1_cancel_ptr_chasing && !s1_ptr_chasing_canceled && !s1_not_fast_match && !s1_fu_op_type_not_ld && s1_addr_misaligned)
1463  XSPerfAccumulate("load_to_load_forward_fail_set_mismatch",    s1_cancel_ptr_chasing && !s1_ptr_chasing_canceled && !s1_not_fast_match && !s1_fu_op_type_not_ld && !s1_addr_misaligned && s1_addr_mismatch)
1464
1465  // bug lyq: some signals in perfEvents are no longer suitable for the current MemBlock design
1466  // hardware performance counter
1467  val perfEvents = Seq(
1468    ("load_s0_in_fire         ", s0_fire                                                        ),
1469    ("load_to_load_forward    ", s1_fire && s1_try_ptr_chasing && !s1_ptr_chasing_canceled      ),
1470    ("stall_dcache            ", s0_valid && s0_can_go && !io.dcache.req.ready                  ),
1471    ("load_s1_in_fire         ", s0_fire                                                        ),
1472    ("load_s1_tlb_miss        ", s1_fire && io.tlb.resp.bits.miss                               ),
1473    ("load_s2_in_fire         ", s1_fire                                                        ),
1474    ("load_s2_dcache_miss     ", s2_fire && io.dcache.resp.bits.miss                            ),
1475  )
1476  generatePerfEvent()
1477
1478  when(io.ldout.fire){
1479    XSDebug("ldout %x\n", io.ldout.bits.uop.pc)
1480  }
1481  // end
1482}