xref: /XiangShan/src/main/scala/xiangshan/mem/pipeline/LoadUnit.scala (revision 5da19fb3f5e30e8e3654dcd8ba1fefc3f257bb3a)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import xiangshan.ExceptionNO._
24import xiangshan._
25import xiangshan.backend.fu.PMPRespBundle
26import xiangshan.cache._
27import xiangshan.cache.mmu.{TlbCmd, TlbReq, TlbRequestIO, TlbResp}
28
29class LoadToLsqIO(implicit p: Parameters) extends XSBundle {
30  val loadIn = ValidIO(new LqWriteBundle)
31  val loadPaddrIn = ValidIO(new LqPaddrWriteBundle)
32  val ldout = Flipped(DecoupledIO(new ExuOutput))
33  val ldRawData = Input(new LoadDataFromLQBundle)
34  val s2_load_data_forwarded = Output(Bool())
35  val s3_delayed_load_error = Output(Bool())
36  val s2_dcache_require_replay = Output(Bool())
37  val s3_replay_from_fetch = Output(Bool()) // update uop.ctrl.replayInst in load queue in s3
38  val forward = new PipeLoadForwardQueryIO
39  val loadViolationQuery = new LoadViolationQueryIO
40  val trigger = Flipped(new LqTriggerIO)
41}
42
43class LoadToLoadIO(implicit p: Parameters) extends XSBundle {
44  // load to load fast path is limited to ld (64 bit) used as vaddr src1 only
45  val data = UInt(XLEN.W)
46  val valid = Bool()
47}
48
49class LoadUnitTriggerIO(implicit p: Parameters) extends XSBundle {
50  val tdata2 = Input(UInt(64.W))
51  val matchType = Input(UInt(2.W))
52  val tEnable = Input(Bool()) // timing is calculated before this
53  val addrHit = Output(Bool())
54  val lastDataHit = Output(Bool())
55}
56
57// Load Pipeline Stage 0
58// Generate addr, use addr to query DCache and DTLB
59class LoadUnit_S0(implicit p: Parameters) extends XSModule with HasDCacheParameters{
60  val io = IO(new Bundle() {
61    val in = Flipped(Decoupled(new ExuInput))
62    val out = Decoupled(new LsPipelineBundle)
63    val dtlbReq = DecoupledIO(new TlbReq)
64    val dcacheReq = DecoupledIO(new DCacheWordReq)
65    val rsIdx = Input(UInt(log2Up(IssQueSize).W))
66    val isFirstIssue = Input(Bool())
67    val fastpath = Input(new LoadToLoadIO)
68    val s0_kill = Input(Bool())
69  })
70  require(LoadPipelineWidth == exuParameters.LduCnt)
71
72  val imm12 = io.in.bits.uop.ctrl.imm(11, 0)
73  val s0_vaddr = WireInit(io.in.bits.src(0) + SignExt(imm12, VAddrBits))
74  val s0_mask = WireInit(genWmask(s0_vaddr, io.in.bits.uop.ctrl.fuOpType(1,0)))
75  val s0_uop = WireInit(io.in.bits.uop)
76
77  if (EnableLoadToLoadForward) {
78    // When there's no valid instruction from RS, we try the load-to-load forwarding.
79    when (!io.in.valid) {
80      s0_vaddr := io.fastpath.data
81      // Assume the pointer chasing is always ld.
82      s0_uop.ctrl.fuOpType := LSUOpType.ld
83      s0_mask := genWmask(0.U, LSUOpType.ld)
84    }
85  }
86
87  val isSoftPrefetch = LSUOpType.isPrefetch(s0_uop.ctrl.fuOpType)
88  val isSoftPrefetchRead = s0_uop.ctrl.fuOpType === LSUOpType.prefetch_r
89  val isSoftPrefetchWrite = s0_uop.ctrl.fuOpType === LSUOpType.prefetch_w
90
91  // query DTLB
92  io.dtlbReq.valid := io.in.valid || io.fastpath.valid
93  io.dtlbReq.bits.vaddr := s0_vaddr
94  io.dtlbReq.bits.cmd := TlbCmd.read
95  io.dtlbReq.bits.size := LSUOpType.size(s0_uop.ctrl.fuOpType)
96  io.dtlbReq.bits.kill := DontCare
97  io.dtlbReq.bits.debug.robIdx := s0_uop.robIdx
98  io.dtlbReq.bits.debug.pc := s0_uop.cf.pc
99  io.dtlbReq.bits.debug.isFirstIssue := io.isFirstIssue
100
101  // query DCache
102  io.dcacheReq.valid := io.in.valid || io.fastpath.valid
103  when (isSoftPrefetchRead) {
104    io.dcacheReq.bits.cmd  := MemoryOpConstants.M_PFR
105  }.elsewhen (isSoftPrefetchWrite) {
106    io.dcacheReq.bits.cmd  := MemoryOpConstants.M_PFW
107  }.otherwise {
108    io.dcacheReq.bits.cmd  := MemoryOpConstants.M_XRD
109  }
110  io.dcacheReq.bits.addr := s0_vaddr
111  io.dcacheReq.bits.mask := s0_mask
112  io.dcacheReq.bits.data := DontCare
113  when(isSoftPrefetch) {
114    io.dcacheReq.bits.instrtype := SOFT_PREFETCH.U
115  }.otherwise {
116    io.dcacheReq.bits.instrtype := LOAD_SOURCE.U
117  }
118
119  // TODO: update cache meta
120  io.dcacheReq.bits.id   := DontCare
121
122  val addrAligned = LookupTree(s0_uop.ctrl.fuOpType(1, 0), List(
123    "b00".U   -> true.B,                   //b
124    "b01".U   -> (s0_vaddr(0)    === 0.U), //h
125    "b10".U   -> (s0_vaddr(1, 0) === 0.U), //w
126    "b11".U   -> (s0_vaddr(2, 0) === 0.U)  //d
127  ))
128
129  io.out.valid := (io.in.valid || io.fastpath.valid) && io.dcacheReq.ready && !io.s0_kill
130
131  io.out.bits := DontCare
132  io.out.bits.vaddr := s0_vaddr
133  io.out.bits.mask := s0_mask
134  io.out.bits.uop := s0_uop
135  io.out.bits.uop.cf.exceptionVec(loadAddrMisaligned) := !addrAligned
136  io.out.bits.rsIdx := io.rsIdx
137  io.out.bits.isFirstIssue := io.isFirstIssue
138  io.out.bits.isSoftPrefetch := isSoftPrefetch
139
140  io.in.ready := !io.in.valid || (io.out.ready && io.dcacheReq.ready)
141
142  XSDebug(io.dcacheReq.fire,
143    p"[DCACHE LOAD REQ] pc ${Hexadecimal(s0_uop.cf.pc)}, vaddr ${Hexadecimal(s0_vaddr)}\n"
144  )
145  XSPerfAccumulate("in_valid", io.in.valid)
146  XSPerfAccumulate("in_fire", io.in.fire)
147  XSPerfAccumulate("in_fire_first_issue", io.in.valid && io.isFirstIssue)
148  XSPerfAccumulate("stall_out", io.out.valid && !io.out.ready && io.dcacheReq.ready)
149  XSPerfAccumulate("stall_dcache", io.out.valid && io.out.ready && !io.dcacheReq.ready)
150  XSPerfAccumulate("addr_spec_success", io.out.fire && s0_vaddr(VAddrBits-1, 12) === io.in.bits.src(0)(VAddrBits-1, 12))
151  XSPerfAccumulate("addr_spec_failed", io.out.fire && s0_vaddr(VAddrBits-1, 12) =/= io.in.bits.src(0)(VAddrBits-1, 12))
152  XSPerfAccumulate("addr_spec_success_once", io.out.fire && s0_vaddr(VAddrBits-1, 12) === io.in.bits.src(0)(VAddrBits-1, 12) && io.isFirstIssue)
153  XSPerfAccumulate("addr_spec_failed_once", io.out.fire && s0_vaddr(VAddrBits-1, 12) =/= io.in.bits.src(0)(VAddrBits-1, 12) && io.isFirstIssue)
154}
155
156
157// Load Pipeline Stage 1
158// TLB resp (send paddr to dcache)
159class LoadUnit_S1(implicit p: Parameters) extends XSModule {
160  val io = IO(new Bundle() {
161    val in = Flipped(Decoupled(new LsPipelineBundle))
162    val s1_kill = Input(Bool())
163    val out = Decoupled(new LsPipelineBundle)
164    val dtlbResp = Flipped(DecoupledIO(new TlbResp(2)))
165    val lsuPAddr = Output(UInt(PAddrBits.W))
166    val dcachePAddr = Output(UInt(PAddrBits.W))
167    val dcacheKill = Output(Bool())
168    val dcacheBankConflict = Input(Bool())
169    val fullForwardFast = Output(Bool())
170    val sbuffer = new LoadForwardQueryIO
171    val lsq = new PipeLoadForwardQueryIO
172    val loadViolationQueryReq = Decoupled(new LoadViolationQueryReq)
173    val rsFeedback = ValidIO(new RSFeedback)
174    val csrCtrl = Flipped(new CustomCSRCtrlIO)
175    val needLdVioCheckRedo = Output(Bool())
176  })
177
178  val s1_uop = io.in.bits.uop
179  val s1_paddr_dup_lsu = io.dtlbResp.bits.paddr(0)
180  val s1_paddr_dup_dcache = io.dtlbResp.bits.paddr(1)
181  // af & pf exception were modified below.
182  val s1_exception = ExceptionNO.selectByFu(io.out.bits.uop.cf.exceptionVec, lduCfg).asUInt.orR
183  val s1_tlb_miss = io.dtlbResp.bits.miss
184  val s1_mask = io.in.bits.mask
185  val s1_bank_conflict = io.dcacheBankConflict
186
187  io.out.bits := io.in.bits // forwardXX field will be updated in s1
188
189  io.dtlbResp.ready := true.B
190
191  io.lsuPAddr := s1_paddr_dup_lsu
192  io.dcachePAddr := s1_paddr_dup_dcache
193  //io.dcacheKill := s1_tlb_miss || s1_exception || s1_mmio
194  io.dcacheKill := s1_tlb_miss || s1_exception || io.s1_kill
195  // load forward query datapath
196  io.sbuffer.valid := io.in.valid && !(s1_exception || s1_tlb_miss || io.s1_kill)
197  io.sbuffer.vaddr := io.in.bits.vaddr
198  io.sbuffer.paddr := s1_paddr_dup_lsu
199  io.sbuffer.uop := s1_uop
200  io.sbuffer.sqIdx := s1_uop.sqIdx
201  io.sbuffer.mask := s1_mask
202  io.sbuffer.pc := s1_uop.cf.pc // FIXME: remove it
203
204  io.lsq.valid := io.in.valid && !(s1_exception || s1_tlb_miss || io.s1_kill)
205  io.lsq.vaddr := io.in.bits.vaddr
206  io.lsq.paddr := s1_paddr_dup_lsu
207  io.lsq.uop := s1_uop
208  io.lsq.sqIdx := s1_uop.sqIdx
209  io.lsq.sqIdxMask := DontCare // will be overwritten by sqIdxMask pre-generated in s0
210  io.lsq.mask := s1_mask
211  io.lsq.pc := s1_uop.cf.pc // FIXME: remove it
212
213  // ld-ld violation query
214  io.loadViolationQueryReq.valid := io.in.valid && !(s1_exception || s1_tlb_miss || io.s1_kill)
215  io.loadViolationQueryReq.bits.paddr := s1_paddr_dup_lsu
216  io.loadViolationQueryReq.bits.uop := s1_uop
217
218  // Generate forwardMaskFast to wake up insts earlier
219  val forwardMaskFast = io.lsq.forwardMaskFast.asUInt | io.sbuffer.forwardMaskFast.asUInt
220  io.fullForwardFast := ((~forwardMaskFast).asUInt & s1_mask) === 0.U
221
222  // Generate feedback signal caused by:
223  // * dcache bank conflict
224  // * need redo ld-ld violation check
225  val needLdVioCheckRedo = io.loadViolationQueryReq.valid &&
226    !io.loadViolationQueryReq.ready &&
227    RegNext(io.csrCtrl.ldld_vio_check_enable)
228  io.needLdVioCheckRedo := needLdVioCheckRedo
229  io.rsFeedback.valid := io.in.valid && (s1_bank_conflict || needLdVioCheckRedo) && !io.s1_kill
230  io.rsFeedback.bits.hit := false.B // we have found s1_bank_conflict / re do ld-ld violation check
231  io.rsFeedback.bits.rsIdx := io.in.bits.rsIdx
232  io.rsFeedback.bits.flushState := io.in.bits.ptwBack
233  io.rsFeedback.bits.sourceType := Mux(s1_bank_conflict, RSFeedbackType.bankConflict, RSFeedbackType.ldVioCheckRedo)
234  io.rsFeedback.bits.dataInvalidSqIdx := DontCare
235
236  // if replay is detected in load_s1,
237  // load inst will be canceled immediately
238  io.out.valid := io.in.valid && !io.rsFeedback.valid && !io.s1_kill
239  io.out.bits.paddr := s1_paddr_dup_lsu
240  io.out.bits.tlbMiss := s1_tlb_miss
241
242  // current ori test will cause the case of ldest == 0, below will be modifeid in the future.
243  // af & pf exception were modified
244  io.out.bits.uop.cf.exceptionVec(loadPageFault) := io.dtlbResp.bits.excp(0).pf.ld
245  io.out.bits.uop.cf.exceptionVec(loadAccessFault) := io.dtlbResp.bits.excp(0).af.ld
246
247  io.out.bits.ptwBack := io.dtlbResp.bits.ptwBack
248  io.out.bits.rsIdx := io.in.bits.rsIdx
249
250  io.out.bits.isSoftPrefetch := io.in.bits.isSoftPrefetch
251
252  io.in.ready := !io.in.valid || io.out.ready
253
254  XSPerfAccumulate("in_valid", io.in.valid)
255  XSPerfAccumulate("in_fire", io.in.fire)
256  XSPerfAccumulate("in_fire_first_issue", io.in.fire && io.in.bits.isFirstIssue)
257  XSPerfAccumulate("tlb_miss", io.in.fire && s1_tlb_miss)
258  XSPerfAccumulate("tlb_miss_first_issue", io.in.fire && s1_tlb_miss && io.in.bits.isFirstIssue)
259  XSPerfAccumulate("stall_out", io.out.valid && !io.out.ready)
260}
261
262// Load Pipeline Stage 2
263// DCache resp
264class LoadUnit_S2(implicit p: Parameters) extends XSModule with HasLoadHelper {
265  val io = IO(new Bundle() {
266    val in = Flipped(Decoupled(new LsPipelineBundle))
267    val out = Decoupled(new LsPipelineBundle)
268    val rsFeedback = ValidIO(new RSFeedback)
269    val dcacheResp = Flipped(DecoupledIO(new BankedDCacheWordResp))
270    val pmpResp = Flipped(new PMPRespBundle())
271    val lsq = new LoadForwardQueryIO
272    val dataInvalidSqIdx = Input(UInt())
273    val sbuffer = new LoadForwardQueryIO
274    val dataForwarded = Output(Bool())
275    val s2_dcache_require_replay = Output(Bool())
276    val fullForward = Output(Bool())
277    val dcache_kill = Output(Bool())
278    val s3_delayed_load_error = Output(Bool())
279    val loadViolationQueryResp = Flipped(Valid(new LoadViolationQueryResp))
280    val csrCtrl = Flipped(new CustomCSRCtrlIO)
281    val sentFastUop = Input(Bool())
282    val static_pm = Input(Valid(Bool())) // valid for static, bits for mmio
283    val s2_can_replay_from_fetch = Output(Bool()) // dirty code
284    val loadDataFromDcache = Output(new LoadDataFromDcacheBundle)
285  })
286
287  val pmp = WireInit(io.pmpResp)
288  when (io.static_pm.valid) {
289    pmp.ld := false.B
290    pmp.st := false.B
291    pmp.instr := false.B
292    pmp.mmio := io.static_pm.bits
293  }
294
295  val s2_is_prefetch = io.in.bits.isSoftPrefetch
296
297  // exception that may cause load addr to be invalid / illegal
298  //
299  // if such exception happen, that inst and its exception info
300  // will be force writebacked to rob
301  val s2_exception_vec = WireInit(io.in.bits.uop.cf.exceptionVec)
302  s2_exception_vec(loadAccessFault) := io.in.bits.uop.cf.exceptionVec(loadAccessFault) || pmp.ld
303  // soft prefetch will not trigger any exception (but ecc error interrupt may be triggered)
304  when (s2_is_prefetch) {
305    s2_exception_vec := 0.U.asTypeOf(s2_exception_vec.cloneType)
306  }
307  val s2_exception = ExceptionNO.selectByFu(s2_exception_vec, lduCfg).asUInt.orR
308
309  // writeback access fault caused by ecc error / bus error
310  //
311  // * ecc data error is slow to generate, so we will not use it until load stage 3
312  // * in load stage 3, an extra signal io.load_error will be used to
313
314  // now cache ecc error will raise an access fault
315  // at the same time, error info (including error paddr) will be write to
316  // an customized CSR "CACHE_ERROR"
317  if (EnableAccurateLoadError) {
318    io.s3_delayed_load_error := io.dcacheResp.bits.error_delayed &&
319      io.csrCtrl.cache_error_enable &&
320      RegNext(io.out.valid)
321  } else {
322    io.s3_delayed_load_error := false.B
323  }
324
325  val actually_mmio = pmp.mmio
326  val s2_uop = io.in.bits.uop
327  val s2_mask = io.in.bits.mask
328  val s2_paddr = io.in.bits.paddr
329  val s2_tlb_miss = io.in.bits.tlbMiss
330  val s2_mmio = !s2_is_prefetch && actually_mmio && !s2_exception
331  val s2_cache_miss = io.dcacheResp.bits.miss
332  val s2_cache_replay = io.dcacheResp.bits.replay
333  val s2_cache_tag_error = io.dcacheResp.bits.tag_error
334  val s2_forward_fail = io.lsq.matchInvalid || io.sbuffer.matchInvalid
335  val s2_ldld_violation = io.loadViolationQueryResp.valid &&
336    io.loadViolationQueryResp.bits.have_violation &&
337    RegNext(io.csrCtrl.ldld_vio_check_enable)
338  val s2_data_invalid = io.lsq.dataInvalid && !s2_ldld_violation && !s2_exception
339
340  io.dcache_kill := pmp.ld || pmp.mmio // move pmp resp kill to outside
341  io.dcacheResp.ready := true.B
342  val dcacheShouldResp = !(s2_tlb_miss || s2_exception || s2_mmio || s2_is_prefetch)
343  assert(!(io.in.valid && (dcacheShouldResp && !io.dcacheResp.valid)), "DCache response got lost")
344
345  // merge forward result
346  // lsq has higher priority than sbuffer
347  val forwardMask = Wire(Vec(8, Bool()))
348  val forwardData = Wire(Vec(8, UInt(8.W)))
349
350  val fullForward = ((~forwardMask.asUInt).asUInt & s2_mask) === 0.U && !io.lsq.dataInvalid
351  io.lsq := DontCare
352  io.sbuffer := DontCare
353  io.fullForward := fullForward
354
355  // generate XLEN/8 Muxs
356  for (i <- 0 until XLEN / 8) {
357    forwardMask(i) := io.lsq.forwardMask(i) || io.sbuffer.forwardMask(i)
358    forwardData(i) := Mux(io.lsq.forwardMask(i), io.lsq.forwardData(i), io.sbuffer.forwardData(i))
359  }
360
361  XSDebug(io.out.fire, "[FWD LOAD RESP] pc %x fwd %x(%b) + %x(%b)\n",
362    s2_uop.cf.pc,
363    io.lsq.forwardData.asUInt, io.lsq.forwardMask.asUInt,
364    io.in.bits.forwardData.asUInt, io.in.bits.forwardMask.asUInt
365  )
366
367  // data merge
368  // val rdataVec = VecInit((0 until XLEN / 8).map(j =>
369  //   Mux(forwardMask(j), forwardData(j), io.dcacheResp.bits.data(8*(j+1)-1, 8*j))
370  // )) // s2_rdataVec will be write to load queue
371  // val rdata = rdataVec.asUInt
372  // val rdataSel = LookupTree(s2_paddr(2, 0), List(
373  //   "b000".U -> rdata(63, 0),
374  //   "b001".U -> rdata(63, 8),
375  //   "b010".U -> rdata(63, 16),
376  //   "b011".U -> rdata(63, 24),
377  //   "b100".U -> rdata(63, 32),
378  //   "b101".U -> rdata(63, 40),
379  //   "b110".U -> rdata(63, 48),
380  //   "b111".U -> rdata(63, 56)
381  // ))
382  // val rdataPartialLoad = rdataHelper(s2_uop, rdataSel) // s2_rdataPartialLoad is not used
383
384  io.out.valid := io.in.valid && !s2_tlb_miss && !s2_data_invalid
385  // Inst will be canceled in store queue / lsq,
386  // so we do not need to care about flush in load / store unit's out.valid
387  io.out.bits := io.in.bits
388  // io.out.bits.data := rdataPartialLoad
389  io.out.bits.data := 0.U // data will be generated in load_s3
390  // when exception occurs, set it to not miss and let it write back to rob (via int port)
391  if (EnableFastForward) {
392    io.out.bits.miss := s2_cache_miss &&
393      !s2_exception &&
394      !fullForward &&
395      !s2_is_prefetch
396  } else {
397    io.out.bits.miss := s2_cache_miss &&
398      !s2_exception &&
399      !s2_is_prefetch
400  }
401  io.out.bits.uop.ctrl.fpWen := io.in.bits.uop.ctrl.fpWen && !s2_exception
402
403  io.loadDataFromDcache.bankedDcacheData := io.dcacheResp.bits.bank_data
404  io.loadDataFromDcache.bank_oh := io.dcacheResp.bits.bank_oh
405  // io.loadDataFromDcache.dcacheData := io.dcacheResp.bits.data
406  io.loadDataFromDcache.forwardMask := forwardMask
407  io.loadDataFromDcache.forwardData := forwardData
408  io.loadDataFromDcache.uop := io.out.bits.uop
409  io.loadDataFromDcache.addrOffset := s2_paddr(2, 0)
410
411  io.s2_can_replay_from_fetch := !s2_mmio && !s2_is_prefetch && !s2_tlb_miss
412  // if forward fail, replay this inst from fetch
413  val debug_forwardFailReplay = s2_forward_fail && !s2_mmio && !s2_is_prefetch && !s2_tlb_miss
414  // if ld-ld violation is detected, replay from this inst from fetch
415  val debug_ldldVioReplay = s2_ldld_violation && !s2_mmio && !s2_is_prefetch && !s2_tlb_miss
416  // io.out.bits.uop.ctrl.replayInst := false.B
417
418  io.out.bits.mmio := s2_mmio
419  io.out.bits.uop.ctrl.flushPipe := s2_mmio && io.sentFastUop
420  io.out.bits.uop.cf.exceptionVec := s2_exception_vec // cache error not included
421
422  // For timing reasons, sometimes we can not let
423  // io.out.bits.miss := s2_cache_miss && !s2_exception && !fullForward
424  // We use io.dataForwarded instead. It means:
425  // 1. Forward logic have prepared all data needed,
426  //    and dcache query is no longer needed.
427  // 2. ... or data cache tag error is detected, this kind of inst
428  //    will not update miss queue. That is to say, if miss, that inst
429  //    may not be refilled
430  // Such inst will be writebacked from load queue.
431  io.dataForwarded := s2_cache_miss && !s2_exception &&
432    (fullForward || io.csrCtrl.cache_error_enable && s2_cache_tag_error)
433  // io.out.bits.forwardX will be send to lq
434  io.out.bits.forwardMask := forwardMask
435  // data from dcache is not included in io.out.bits.forwardData
436  io.out.bits.forwardData := forwardData
437
438  io.in.ready := io.out.ready || !io.in.valid
439
440  // feedback tlb result to RS
441  io.rsFeedback.valid := io.in.valid
442  val s2_need_replay_from_rs = Wire(Bool())
443  if (EnableFastForward) {
444    s2_need_replay_from_rs :=
445      s2_tlb_miss || // replay if dtlb miss
446      s2_cache_replay && !s2_is_prefetch && !s2_mmio && !s2_exception && !fullForward || // replay if dcache miss queue full / busy
447      s2_data_invalid && !s2_is_prefetch // replay if store to load forward data is not ready
448  } else {
449    // Note that if all parts of data are available in sq / sbuffer, replay required by dcache will not be scheduled
450    s2_need_replay_from_rs :=
451      s2_tlb_miss || // replay if dtlb miss
452      s2_cache_replay && !s2_is_prefetch && !s2_mmio && !s2_exception && !io.dataForwarded || // replay if dcache miss queue full / busy
453      s2_data_invalid && !s2_is_prefetch // replay if store to load forward data is not ready
454  }
455  io.rsFeedback.bits.hit := !s2_need_replay_from_rs
456  io.rsFeedback.bits.rsIdx := io.in.bits.rsIdx
457  io.rsFeedback.bits.flushState := io.in.bits.ptwBack
458  // feedback source priority: tlbMiss > dataInvalid > mshrFull
459  // general case priority: tlbMiss > exception (include forward_fail / ldld_violation) > mmio > dataInvalid > mshrFull > normal miss / hit
460  io.rsFeedback.bits.sourceType := Mux(s2_tlb_miss, RSFeedbackType.tlbMiss,
461    Mux(s2_data_invalid,
462      RSFeedbackType.dataInvalid,
463      RSFeedbackType.mshrFull
464    )
465  )
466  io.rsFeedback.bits.dataInvalidSqIdx.value := io.dataInvalidSqIdx
467  io.rsFeedback.bits.dataInvalidSqIdx.flag := DontCare
468
469  // s2_cache_replay is quite slow to generate, send it separately to LQ
470  if (EnableFastForward) {
471    io.s2_dcache_require_replay := s2_cache_replay && !fullForward
472  } else {
473    io.s2_dcache_require_replay := s2_cache_replay &&
474      !io.rsFeedback.bits.hit &&
475      !io.dataForwarded &&
476      !s2_is_prefetch &&
477      io.out.bits.miss
478  }
479
480  XSPerfAccumulate("in_valid", io.in.valid)
481  XSPerfAccumulate("in_fire", io.in.fire)
482  XSPerfAccumulate("in_fire_first_issue", io.in.fire && io.in.bits.isFirstIssue)
483  XSPerfAccumulate("dcache_miss", io.in.fire && s2_cache_miss)
484  XSPerfAccumulate("dcache_miss_first_issue", io.in.fire && s2_cache_miss && io.in.bits.isFirstIssue)
485  XSPerfAccumulate("full_forward", io.in.valid && fullForward)
486  XSPerfAccumulate("dcache_miss_full_forward", io.in.valid && s2_cache_miss && fullForward)
487  XSPerfAccumulate("replay",  io.rsFeedback.valid && !io.rsFeedback.bits.hit)
488  XSPerfAccumulate("replay_tlb_miss", io.rsFeedback.valid && !io.rsFeedback.bits.hit && s2_tlb_miss)
489  XSPerfAccumulate("replay_cache", io.rsFeedback.valid && !io.rsFeedback.bits.hit && !s2_tlb_miss && s2_cache_replay)
490  XSPerfAccumulate("stall_out", io.out.valid && !io.out.ready)
491  XSPerfAccumulate("replay_from_fetch_forward", io.out.valid && debug_forwardFailReplay)
492  XSPerfAccumulate("replay_from_fetch_load_vio", io.out.valid && debug_ldldVioReplay)
493}
494
495class LoadUnit(implicit p: Parameters) extends XSModule
496  with HasLoadHelper
497  with HasPerfEvents
498  with HasDCacheParameters
499{
500  val io = IO(new Bundle() {
501    val ldin = Flipped(Decoupled(new ExuInput))
502    val ldout = Decoupled(new ExuOutput)
503    val redirect = Flipped(ValidIO(new Redirect))
504    val feedbackSlow = ValidIO(new RSFeedback)
505    val feedbackFast = ValidIO(new RSFeedback)
506    val rsIdx = Input(UInt(log2Up(IssQueSize).W))
507    val isFirstIssue = Input(Bool())
508    val dcache = new DCacheLoadIO
509    val sbuffer = new LoadForwardQueryIO
510    val lsq = new LoadToLsqIO
511    val refill = Flipped(ValidIO(new Refill))
512    val fastUop = ValidIO(new MicroOp) // early wakeup signal generated in load_s1, send to RS in load_s2
513    val trigger = Vec(3, new LoadUnitTriggerIO)
514
515    val tlb = new TlbRequestIO(2)
516    val pmp = Flipped(new PMPRespBundle()) // arrive same to tlb now
517
518    val fastpathOut = Output(new LoadToLoadIO)
519    val fastpathIn = Input(new LoadToLoadIO)
520    val loadFastMatch = Input(Bool())
521    val loadFastImm = Input(UInt(12.W))
522
523    val s3_delayed_load_error = Output(Bool()) // load ecc error
524    // Note that io.s3_delayed_load_error and io.lsq.s3_delayed_load_error is different
525
526    val csrCtrl = Flipped(new CustomCSRCtrlIO)
527  })
528
529  val load_s0 = Module(new LoadUnit_S0)
530  val load_s1 = Module(new LoadUnit_S1)
531  val load_s2 = Module(new LoadUnit_S2)
532
533  // load s0
534  load_s0.io.in <> io.ldin
535  load_s0.io.dtlbReq <> io.tlb.req
536  load_s0.io.dcacheReq <> io.dcache.req
537  load_s0.io.rsIdx := io.rsIdx
538  load_s0.io.isFirstIssue := io.isFirstIssue
539  load_s0.io.s0_kill := false.B
540  val s0_tryPointerChasing = !io.ldin.valid && io.fastpathIn.valid
541  val s0_pointerChasingVAddr = io.fastpathIn.data(5, 0) +& io.loadFastImm(5, 0)
542  load_s0.io.fastpath.valid := io.fastpathIn.valid
543  load_s0.io.fastpath.data := Cat(io.fastpathIn.data(XLEN-1, 6), s0_pointerChasingVAddr(5,0))
544
545  val s1_data = PipelineConnect(load_s0.io.out, load_s1.io.in, true.B,
546    load_s0.io.out.bits.uop.robIdx.needFlush(io.redirect) && !s0_tryPointerChasing).get
547
548  // load s1
549  load_s1.io.s1_kill := RegEnable(load_s0.io.s0_kill, false.B, load_s0.io.in.valid || io.fastpathIn.valid)
550  io.tlb.req_kill := load_s1.io.s1_kill
551  load_s1.io.dtlbResp <> io.tlb.resp
552  io.dcache.s1_paddr_dup_lsu <> load_s1.io.lsuPAddr
553  io.dcache.s1_paddr_dup_dcache <> load_s1.io.dcachePAddr
554  io.dcache.s1_kill := load_s1.io.dcacheKill
555  load_s1.io.sbuffer <> io.sbuffer
556  load_s1.io.lsq <> io.lsq.forward
557  load_s1.io.loadViolationQueryReq <> io.lsq.loadViolationQuery.req
558  load_s1.io.dcacheBankConflict <> io.dcache.s1_bank_conflict
559  load_s1.io.csrCtrl <> io.csrCtrl
560
561  val s0_doTryPointerChasing = s0_tryPointerChasing && load_s0.io.in.ready && load_s0.io.dcacheReq.ready
562  val s1_tryPointerChasing = RegNext(s0_doTryPointerChasing, false.B)
563  val s1_pointerChasingVAddr = RegEnable(s0_pointerChasingVAddr, s0_doTryPointerChasing)
564  val cancelPointerChasing = WireInit(false.B)
565  if (EnableLoadToLoadForward) {
566    // Sometimes, we need to cancel the load-load forwarding.
567    // These can be put at S0 if timing is bad at S1.
568    // Case 0: CACHE_SET(base + offset) != CACHE_SET(base) (lowest 6-bit addition has an overflow)
569    val addressMisMatch = s1_pointerChasingVAddr(6) || RegEnable(io.loadFastImm(11, 6).orR, s0_doTryPointerChasing)
570    // Case 1: the address is not 64-bit aligned or the fuOpType is not LD
571    val addressNotAligned = s1_pointerChasingVAddr(2, 0).orR
572    val fuOpTypeIsNotLd = io.ldin.bits.uop.ctrl.fuOpType =/= LSUOpType.ld
573    // Case 2: this is not a valid load-load pair
574    val notFastMatch = RegEnable(!io.loadFastMatch, s0_tryPointerChasing)
575    // Case 3: this load-load uop is cancelled
576    val isCancelled = !io.ldin.valid
577    when (s1_tryPointerChasing) {
578      cancelPointerChasing := addressMisMatch || addressNotAligned || fuOpTypeIsNotLd || notFastMatch || isCancelled
579      load_s1.io.in.bits.uop := io.ldin.bits.uop
580      val spec_vaddr = s1_data.vaddr
581      val vaddr = Cat(spec_vaddr(VAddrBits - 1, 6), s1_pointerChasingVAddr(5, 3), 0.U(3.W))
582      load_s1.io.in.bits.vaddr := vaddr
583      load_s1.io.in.bits.rsIdx := io.rsIdx
584      load_s1.io.in.bits.isFirstIssue := io.isFirstIssue
585      // We need to replace vaddr(5, 3).
586      val spec_paddr = io.tlb.resp.bits.paddr(0)
587      load_s1.io.dtlbResp.bits.paddr.foreach(_ := Cat(spec_paddr(PAddrBits - 1, 6), s1_pointerChasingVAddr(5, 3), 0.U(3.W)))
588    }
589    when (cancelPointerChasing) {
590      load_s1.io.s1_kill := true.B
591    }.otherwise {
592      load_s0.io.s0_kill := s1_tryPointerChasing
593      when (s1_tryPointerChasing) {
594        io.ldin.ready := true.B
595      }
596    }
597
598    XSPerfAccumulate("load_to_load_forward", s1_tryPointerChasing && !cancelPointerChasing)
599    XSPerfAccumulate("load_to_load_forward_try", s1_tryPointerChasing)
600    XSPerfAccumulate("load_to_load_forward_fail", cancelPointerChasing)
601    XSPerfAccumulate("load_to_load_forward_fail_cancelled", cancelPointerChasing && isCancelled)
602    XSPerfAccumulate("load_to_load_forward_fail_wakeup_mismatch", cancelPointerChasing && !isCancelled && notFastMatch)
603    XSPerfAccumulate("load_to_load_forward_fail_op_not_ld",
604      cancelPointerChasing && !isCancelled && !notFastMatch && fuOpTypeIsNotLd)
605    XSPerfAccumulate("load_to_load_forward_fail_addr_align",
606      cancelPointerChasing && !isCancelled && !notFastMatch && !fuOpTypeIsNotLd && addressNotAligned)
607    XSPerfAccumulate("load_to_load_forward_fail_set_mismatch",
608      cancelPointerChasing && !isCancelled && !notFastMatch && !fuOpTypeIsNotLd && !addressNotAligned && addressMisMatch)
609  }
610  PipelineConnect(load_s1.io.out, load_s2.io.in, true.B,
611    load_s1.io.out.bits.uop.robIdx.needFlush(io.redirect) || cancelPointerChasing)
612
613  // provide paddr for lq
614  io.lsq.loadPaddrIn.valid := load_s1.io.out.valid
615  io.lsq.loadPaddrIn.bits.lqIdx := load_s1.io.out.bits.uop.lqIdx
616  io.lsq.loadPaddrIn.bits.paddr := load_s1.io.lsuPAddr
617
618  // load s2
619  io.dcache.s2_kill := load_s2.io.dcache_kill // to kill mmio resp which are redirected
620  load_s2.io.dcacheResp <> io.dcache.resp
621  load_s2.io.pmpResp <> io.pmp
622  load_s2.io.static_pm := RegNext(io.tlb.resp.bits.static_pm)
623  load_s2.io.lsq.forwardData <> io.lsq.forward.forwardData
624  load_s2.io.lsq.forwardMask <> io.lsq.forward.forwardMask
625  load_s2.io.lsq.forwardMaskFast <> io.lsq.forward.forwardMaskFast // should not be used in load_s2
626  load_s2.io.lsq.dataInvalid <> io.lsq.forward.dataInvalid
627  load_s2.io.lsq.matchInvalid <> io.lsq.forward.matchInvalid
628  load_s2.io.sbuffer.forwardData <> io.sbuffer.forwardData
629  load_s2.io.sbuffer.forwardMask <> io.sbuffer.forwardMask
630  load_s2.io.sbuffer.forwardMaskFast <> io.sbuffer.forwardMaskFast // should not be used in load_s2
631  load_s2.io.sbuffer.dataInvalid <> io.sbuffer.dataInvalid // always false
632  load_s2.io.sbuffer.matchInvalid <> io.sbuffer.matchInvalid
633  load_s2.io.dataForwarded <> io.lsq.s2_load_data_forwarded
634  load_s2.io.dataInvalidSqIdx := io.lsq.forward.dataInvalidSqIdx // provide dataInvalidSqIdx to make wakeup faster
635  load_s2.io.loadViolationQueryResp <> io.lsq.loadViolationQuery.resp
636  load_s2.io.csrCtrl <> io.csrCtrl
637  load_s2.io.sentFastUop := io.fastUop.valid
638
639  // feedback bank conflict / ld-vio check struct hazard to rs
640  io.feedbackFast.bits := RegNext(load_s1.io.rsFeedback.bits)
641  io.feedbackFast.valid := RegNext(load_s1.io.rsFeedback.valid && !load_s1.io.out.bits.uop.robIdx.needFlush(io.redirect))
642
643  // pre-calcuate sqIdx mask in s0, then send it to lsq in s1 for forwarding
644  val sqIdxMaskReg = RegNext(UIntToMask(load_s0.io.in.bits.uop.sqIdx.value, StoreQueueSize))
645  // to enable load-load, sqIdxMask must be calculated based on ldin.uop
646  // If the timing here is not OK, load-load forwarding has to be disabled.
647  // Or we calculate sqIdxMask at RS??
648  io.lsq.forward.sqIdxMask := sqIdxMaskReg
649  if (EnableLoadToLoadForward) {
650    when (s1_tryPointerChasing) {
651      io.lsq.forward.sqIdxMask := UIntToMask(io.ldin.bits.uop.sqIdx.value, StoreQueueSize)
652    }
653  }
654
655  // // use s2_hit_way to select data received in s1
656  // load_s2.io.dcacheResp.bits.data := Mux1H(RegNext(io.dcache.s1_hit_way), RegNext(io.dcache.s1_data))
657  // assert(load_s2.io.dcacheResp.bits.data === io.dcache.resp.bits.data)
658
659  // now io.fastUop.valid is sent to RS in load_s2
660  val s2_dcache_hit = io.dcache.s2_hit // dcache hit dup in lsu side
661
662  io.fastUop.valid := RegNext(
663      !io.dcache.s1_disable_fast_wakeup &&  // load fast wakeup should be disabled when dcache data read is not ready
664      load_s1.io.in.valid && // valid load request
665      !load_s1.io.s1_kill && // killed by load-load forwarding
666      !load_s1.io.dtlbResp.bits.fast_miss && // not mmio or tlb miss, pf / af not included here
667      !io.lsq.forward.dataInvalidFast // forward failed
668    ) &&
669    !RegNext(load_s1.io.needLdVioCheckRedo) && // load-load violation check: load paddr cam struct hazard
670    !RegNext(load_s1.io.out.bits.uop.robIdx.needFlush(io.redirect)) &&
671    s2_dcache_hit // dcache hit in lsu side
672
673  io.fastUop.bits := RegNext(load_s1.io.out.bits.uop)
674
675  XSDebug(load_s0.io.out.valid,
676    p"S0: pc ${Hexadecimal(load_s0.io.out.bits.uop.cf.pc)}, lId ${Hexadecimal(load_s0.io.out.bits.uop.lqIdx.asUInt)}, " +
677    p"vaddr ${Hexadecimal(load_s0.io.out.bits.vaddr)}, mask ${Hexadecimal(load_s0.io.out.bits.mask)}\n")
678  XSDebug(load_s1.io.out.valid,
679    p"S1: pc ${Hexadecimal(load_s1.io.out.bits.uop.cf.pc)}, lId ${Hexadecimal(load_s1.io.out.bits.uop.lqIdx.asUInt)}, tlb_miss ${io.tlb.resp.bits.miss}, " +
680    p"paddr ${Hexadecimal(load_s1.io.out.bits.paddr)}, mmio ${load_s1.io.out.bits.mmio}\n")
681
682  // writeback to LSQ
683  // Current dcache use MSHR
684  // Load queue will be updated at s2 for both hit/miss int/fp load
685  io.lsq.loadIn.valid := load_s2.io.out.valid
686  // generate LqWriteBundle from LsPipelineBundle
687  io.lsq.loadIn.bits.fromLsPipelineBundle(load_s2.io.out.bits)
688  // generate duplicated load queue data wen
689  val load_s2_valid_vec = RegInit(0.U(6.W))
690  val load_s2_leftFire = load_s1.io.out.valid && load_s2.io.in.ready
691  load_s2_valid_vec := 0x0.U(6.W)
692  when (load_s2_leftFire) { load_s2_valid_vec := 0x3f.U(6.W)}
693  when (load_s1.io.out.bits.uop.robIdx.needFlush(io.redirect)) { load_s2_valid_vec := 0x0.U(6.W) }
694  assert(RegNext(load_s2.io.in.valid === load_s2_valid_vec(0)))
695  io.lsq.loadIn.bits.lq_data_wen_dup := load_s2_valid_vec.asBools()
696
697  // s2_dcache_require_replay signal will be RegNexted, then used in s3
698  io.lsq.s2_dcache_require_replay := load_s2.io.s2_dcache_require_replay
699
700  // write to rob and writeback bus
701  val s2_wb_valid = load_s2.io.out.valid && !load_s2.io.out.bits.miss && !load_s2.io.out.bits.mmio
702
703  // Int load, if hit, will be writebacked at s2
704  val hitLoadOut = Wire(Valid(new ExuOutput))
705  hitLoadOut.valid := s2_wb_valid
706  hitLoadOut.bits.uop := load_s2.io.out.bits.uop
707  hitLoadOut.bits.data := load_s2.io.out.bits.data
708  hitLoadOut.bits.redirectValid := false.B
709  hitLoadOut.bits.redirect := DontCare
710  hitLoadOut.bits.debug.isMMIO := load_s2.io.out.bits.mmio
711  hitLoadOut.bits.debug.isPerfCnt := false.B
712  hitLoadOut.bits.debug.paddr := load_s2.io.out.bits.paddr
713  hitLoadOut.bits.debug.vaddr := load_s2.io.out.bits.vaddr
714  hitLoadOut.bits.fflags := DontCare
715
716  load_s2.io.out.ready := true.B
717
718  // load s3
719  val s3_load_wb_meta_reg = RegNext(Mux(hitLoadOut.valid, hitLoadOut.bits, io.lsq.ldout.bits))
720
721  // data from load queue refill
722  val s3_loadDataFromLQ = RegEnable(io.lsq.ldRawData, io.lsq.ldout.valid)
723  val s3_rdataLQ = s3_loadDataFromLQ.mergedData()
724  val s3_rdataSelLQ = LookupTree(s3_loadDataFromLQ.addrOffset, List(
725    "b000".U -> s3_rdataLQ(63, 0),
726    "b001".U -> s3_rdataLQ(63, 8),
727    "b010".U -> s3_rdataLQ(63, 16),
728    "b011".U -> s3_rdataLQ(63, 24),
729    "b100".U -> s3_rdataLQ(63, 32),
730    "b101".U -> s3_rdataLQ(63, 40),
731    "b110".U -> s3_rdataLQ(63, 48),
732    "b111".U -> s3_rdataLQ(63, 56)
733  ))
734  val s3_rdataPartialLoadLQ = rdataHelper(s3_loadDataFromLQ.uop, s3_rdataSelLQ)
735
736  // data from dcache hit
737  val s3_loadDataFromDcache = RegEnable(load_s2.io.loadDataFromDcache, load_s2.io.in.valid)
738  val s3_rdataDcache = s3_loadDataFromDcache.mergedData()
739  val s3_rdataSelDcache = LookupTree(s3_loadDataFromDcache.addrOffset, List(
740    "b000".U -> s3_rdataDcache(63, 0),
741    "b001".U -> s3_rdataDcache(63, 8),
742    "b010".U -> s3_rdataDcache(63, 16),
743    "b011".U -> s3_rdataDcache(63, 24),
744    "b100".U -> s3_rdataDcache(63, 32),
745    "b101".U -> s3_rdataDcache(63, 40),
746    "b110".U -> s3_rdataDcache(63, 48),
747    "b111".U -> s3_rdataDcache(63, 56)
748  ))
749  val s3_rdataPartialLoadDcache = rdataHelper(s3_loadDataFromDcache.uop, s3_rdataSelDcache)
750
751  io.ldout.bits := s3_load_wb_meta_reg
752  io.ldout.bits.data := Mux(RegNext(hitLoadOut.valid), s3_rdataPartialLoadDcache, s3_rdataPartialLoadLQ)
753  io.ldout.valid := RegNext(hitLoadOut.valid) && !RegNext(load_s2.io.out.bits.uop.robIdx.needFlush(io.redirect)) ||
754    RegNext(io.lsq.ldout.valid) && !RegNext(io.lsq.ldout.bits.uop.robIdx.needFlush(io.redirect)) && !RegNext(hitLoadOut.valid)
755
756  io.ldout.bits.uop.cf.exceptionVec(loadAccessFault) := s3_load_wb_meta_reg.uop.cf.exceptionVec(loadAccessFault) ||
757    RegNext(hitLoadOut.valid) && load_s2.io.s3_delayed_load_error
758
759  // fast load to load forward
760  io.fastpathOut.valid := RegNext(load_s2.io.out.valid) // for debug only
761  io.fastpathOut.data := s3_loadDataFromDcache.mergedData() // fastpath is for ld only
762
763  // feedback tlb miss / dcache miss queue full
764  io.feedbackSlow.bits := RegNext(load_s2.io.rsFeedback.bits)
765  io.feedbackSlow.valid := RegNext(load_s2.io.rsFeedback.valid && !load_s2.io.out.bits.uop.robIdx.needFlush(io.redirect))
766  // If replay is reported at load_s1, inst will be canceled (will not enter load_s2),
767  // in that case:
768  // * replay should not be reported twice
769  assert(!(RegNext(io.feedbackFast.valid) && io.feedbackSlow.valid))
770  // * io.fastUop.valid should not be reported
771  assert(!RegNext(io.feedbackFast.valid && io.fastUop.valid))
772
773  // load forward_fail/ldld_violation check
774  // check for inst in load pipeline
775  val s3_forward_fail = RegNext(io.lsq.forward.matchInvalid || io.sbuffer.matchInvalid)
776  val s3_ldld_violation = RegNext(
777    io.lsq.loadViolationQuery.resp.valid &&
778    io.lsq.loadViolationQuery.resp.bits.have_violation &&
779    RegNext(io.csrCtrl.ldld_vio_check_enable)
780  )
781  val s3_need_replay_from_fetch = s3_forward_fail || s3_ldld_violation
782  val s3_can_replay_from_fetch = RegEnable(load_s2.io.s2_can_replay_from_fetch, load_s2.io.out.valid)
783  // 1) use load pipe check result generated in load_s3 iff load_hit
784  when (RegNext(hitLoadOut.valid)) {
785    io.ldout.bits.uop.ctrl.replayInst := s3_need_replay_from_fetch
786  }
787  // 2) otherwise, write check result to load queue
788  io.lsq.s3_replay_from_fetch := s3_need_replay_from_fetch && s3_can_replay_from_fetch
789
790  // s3_delayed_load_error path is not used for now, as we writeback load result in load_s3
791  // but we keep this path for future use
792  io.s3_delayed_load_error := false.B
793  io.lsq.s3_delayed_load_error := false.B //load_s2.io.s3_delayed_load_error
794
795  io.lsq.ldout.ready := !hitLoadOut.valid
796
797  when(io.feedbackSlow.valid && !io.feedbackSlow.bits.hit){
798    // when need replay from rs, inst should not be writebacked to rob
799    assert(RegNext(!hitLoadOut.valid))
800    assert(RegNext(!io.lsq.loadIn.valid) || RegNext(load_s2.io.s2_dcache_require_replay))
801  }
802
803  val lastValidData = RegEnable(io.ldout.bits.data, io.ldout.fire)
804  val hitLoadAddrTriggerHitVec = Wire(Vec(3, Bool()))
805  val lqLoadAddrTriggerHitVec = io.lsq.trigger.lqLoadAddrTriggerHitVec
806  (0 until 3).map{i => {
807    val tdata2 = io.trigger(i).tdata2
808    val matchType = io.trigger(i).matchType
809    val tEnable = io.trigger(i).tEnable
810
811    hitLoadAddrTriggerHitVec(i) := TriggerCmp(load_s2.io.out.bits.vaddr, tdata2, matchType, tEnable)
812    io.trigger(i).addrHit := Mux(hitLoadOut.valid, hitLoadAddrTriggerHitVec(i), lqLoadAddrTriggerHitVec(i))
813    io.trigger(i).lastDataHit := TriggerCmp(lastValidData, tdata2, matchType, tEnable)
814  }}
815  io.lsq.trigger.hitLoadAddrTriggerHitVec := hitLoadAddrTriggerHitVec
816
817  val perfEvents = Seq(
818    ("load_s0_in_fire         ", load_s0.io.in.fire                                                                                                              ),
819    ("load_to_load_forward    ", load_s1.io.out.valid && s1_tryPointerChasing && !cancelPointerChasing                                                           ),
820    ("stall_dcache            ", load_s0.io.out.valid && load_s0.io.out.ready && !load_s0.io.dcacheReq.ready                                                     ),
821    ("load_s1_in_fire         ", load_s1.io.in.fire                                                                                                              ),
822    ("load_s1_tlb_miss        ", load_s1.io.in.fire && load_s1.io.dtlbResp.bits.miss                                                                             ),
823    ("load_s2_in_fire         ", load_s2.io.in.fire                                                                                                              ),
824    ("load_s2_dcache_miss     ", load_s2.io.in.fire && load_s2.io.dcacheResp.bits.miss                                                                           ),
825    ("load_s2_replay          ", load_s2.io.rsFeedback.valid && !load_s2.io.rsFeedback.bits.hit                                                                  ),
826    ("load_s2_replay_tlb_miss ", load_s2.io.rsFeedback.valid && !load_s2.io.rsFeedback.bits.hit && load_s2.io.in.bits.tlbMiss                                    ),
827    ("load_s2_replay_cache    ", load_s2.io.rsFeedback.valid && !load_s2.io.rsFeedback.bits.hit && !load_s2.io.in.bits.tlbMiss && load_s2.io.dcacheResp.bits.miss),
828  )
829  generatePerfEvent()
830
831  when(io.ldout.fire){
832    XSDebug("ldout %x\n", io.ldout.bits.uop.cf.pc)
833  }
834}
835