xref: /XiangShan/src/main/scala/xiangshan/mem/pipeline/LoadUnit.scala (revision 9e4583a22ed4d6cc995424fb8693c95be0eaef84)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import xiangshan.ExceptionNO._
24import xiangshan._
25import xiangshan.backend.fu.PMPRespBundle
26import xiangshan.cache._
27import xiangshan.cache.mmu.{TlbCmd, TlbReq, TlbRequestIO, TlbResp}
28
29class LoadToLsqIO(implicit p: Parameters) extends XSBundle {
30  val loadIn = ValidIO(new LsPipelineBundle)
31  val ldout = Flipped(DecoupledIO(new ExuOutput))
32  val loadDataForwarded = Output(Bool())
33  val delayedLoadError = Output(Bool())
34  val dcacheRequireReplay = Output(Bool())
35  val forward = new PipeLoadForwardQueryIO
36  val loadViolationQuery = new LoadViolationQueryIO
37  val trigger = Flipped(new LqTriggerIO)
38}
39
40class LoadToLoadIO(implicit p: Parameters) extends XSBundle {
41  // load to load fast path is limited to ld (64 bit) used as vaddr src1 only
42  val data = UInt(XLEN.W)
43  val valid = Bool()
44}
45
46class LoadUnitTriggerIO(implicit p: Parameters) extends XSBundle {
47  val tdata2 = Input(UInt(64.W))
48  val matchType = Input(UInt(2.W))
49  val tEnable = Input(Bool()) // timing is calculated before this
50  val addrHit = Output(Bool())
51  val lastDataHit = Output(Bool())
52}
53
54// Load Pipeline Stage 0
55// Generate addr, use addr to query DCache and DTLB
56class LoadUnit_S0(implicit p: Parameters) extends XSModule with HasDCacheParameters{
57  val io = IO(new Bundle() {
58    val in = Flipped(Decoupled(new ExuInput))
59    val out = Decoupled(new LsPipelineBundle)
60    val fastpath = Input(Vec(LoadPipelineWidth, new LoadToLoadIO))
61    val dtlbReq = DecoupledIO(new TlbReq)
62    val dcacheReq = DecoupledIO(new DCacheWordReq)
63    val rsIdx = Input(UInt(log2Up(IssQueSize).W))
64    val isFirstIssue = Input(Bool())
65    val loadFastMatch = Input(UInt(exuParameters.LduCnt.W))
66  })
67  require(LoadPipelineWidth == exuParameters.LduCnt)
68
69  val s0_uop = io.in.bits.uop
70  val imm12 = WireInit(s0_uop.ctrl.imm(11,0))
71
72  val s0_vaddr = WireInit(io.in.bits.src(0) + SignExt(s0_uop.ctrl.imm(11,0), VAddrBits))
73  val s0_mask = WireInit(genWmask(s0_vaddr, s0_uop.ctrl.fuOpType(1,0)))
74
75  if (EnableLoadToLoadForward) {
76    // slow vaddr from non-load insts
77    val slowpath_vaddr = io.in.bits.src(0) + SignExt(s0_uop.ctrl.imm(11,0), VAddrBits)
78    val slowpath_mask = genWmask(slowpath_vaddr, s0_uop.ctrl.fuOpType(1,0))
79
80    // fast vaddr from load insts
81    val fastpath_vaddrs = WireInit(VecInit(List.tabulate(LoadPipelineWidth)(i => {
82      io.fastpath(i).data + SignExt(s0_uop.ctrl.imm(11,0), VAddrBits)
83    })))
84    val fastpath_masks = WireInit(VecInit(List.tabulate(LoadPipelineWidth)(i => {
85      genWmask(fastpath_vaddrs(i), s0_uop.ctrl.fuOpType(1,0))
86    })))
87    val fastpath_vaddr = Mux1H(io.loadFastMatch, fastpath_vaddrs)
88    val fastpath_mask  = Mux1H(io.loadFastMatch, fastpath_masks)
89
90    // select vaddr from 2 alus
91    s0_vaddr := Mux(io.loadFastMatch.orR, fastpath_vaddr, slowpath_vaddr)
92    s0_mask  := Mux(io.loadFastMatch.orR, fastpath_mask, slowpath_mask)
93    XSPerfAccumulate("load_to_load_forward", io.loadFastMatch.orR && io.in.fire())
94  }
95
96  val isSoftPrefetch = LSUOpType.isPrefetch(s0_uop.ctrl.fuOpType)
97  val isSoftPrefetchRead = s0_uop.ctrl.fuOpType === LSUOpType.prefetch_r
98  val isSoftPrefetchWrite = s0_uop.ctrl.fuOpType === LSUOpType.prefetch_w
99
100  // query DTLB
101  io.dtlbReq.valid := io.in.valid
102  io.dtlbReq.bits.vaddr := s0_vaddr
103  io.dtlbReq.bits.cmd := TlbCmd.read
104  io.dtlbReq.bits.size := LSUOpType.size(io.in.bits.uop.ctrl.fuOpType)
105  io.dtlbReq.bits.robIdx := s0_uop.robIdx
106  io.dtlbReq.bits.debug.pc := s0_uop.cf.pc
107  io.dtlbReq.bits.debug.isFirstIssue := io.isFirstIssue
108
109  // query DCache
110  io.dcacheReq.valid := io.in.valid
111  when (isSoftPrefetchRead) {
112    io.dcacheReq.bits.cmd  := MemoryOpConstants.M_PFR
113  }.elsewhen (isSoftPrefetchWrite) {
114    io.dcacheReq.bits.cmd  := MemoryOpConstants.M_PFW
115  }.otherwise {
116    io.dcacheReq.bits.cmd  := MemoryOpConstants.M_XRD
117  }
118  io.dcacheReq.bits.addr := s0_vaddr
119  io.dcacheReq.bits.mask := s0_mask
120  io.dcacheReq.bits.data := DontCare
121  when(isSoftPrefetch) {
122    io.dcacheReq.bits.instrtype := SOFT_PREFETCH.U
123  }.otherwise {
124    io.dcacheReq.bits.instrtype := LOAD_SOURCE.U
125  }
126
127  // TODO: update cache meta
128  io.dcacheReq.bits.id   := DontCare
129
130  val addrAligned = LookupTree(s0_uop.ctrl.fuOpType(1, 0), List(
131    "b00".U   -> true.B,                   //b
132    "b01".U   -> (s0_vaddr(0)    === 0.U), //h
133    "b10".U   -> (s0_vaddr(1, 0) === 0.U), //w
134    "b11".U   -> (s0_vaddr(2, 0) === 0.U)  //d
135  ))
136
137  io.out.valid := io.in.valid && io.dcacheReq.ready
138
139  io.out.bits := DontCare
140  io.out.bits.vaddr := s0_vaddr
141  io.out.bits.mask := s0_mask
142  io.out.bits.uop := s0_uop
143  io.out.bits.uop.cf.exceptionVec(loadAddrMisaligned) := !addrAligned
144  io.out.bits.rsIdx := io.rsIdx
145  io.out.bits.isFirstIssue := io.isFirstIssue
146  io.out.bits.isSoftPrefetch := isSoftPrefetch
147
148  io.in.ready := !io.in.valid || (io.out.ready && io.dcacheReq.ready)
149
150  XSDebug(io.dcacheReq.fire(),
151    p"[DCACHE LOAD REQ] pc ${Hexadecimal(s0_uop.cf.pc)}, vaddr ${Hexadecimal(s0_vaddr)}\n"
152  )
153  XSPerfAccumulate("in_valid", io.in.valid)
154  XSPerfAccumulate("in_fire", io.in.fire)
155  XSPerfAccumulate("in_fire_first_issue", io.in.valid && io.isFirstIssue)
156  XSPerfAccumulate("stall_out", io.out.valid && !io.out.ready && io.dcacheReq.ready)
157  XSPerfAccumulate("stall_dcache", io.out.valid && io.out.ready && !io.dcacheReq.ready)
158  XSPerfAccumulate("addr_spec_success", io.out.fire() && s0_vaddr(VAddrBits-1, 12) === io.in.bits.src(0)(VAddrBits-1, 12))
159  XSPerfAccumulate("addr_spec_failed", io.out.fire() && s0_vaddr(VAddrBits-1, 12) =/= io.in.bits.src(0)(VAddrBits-1, 12))
160  XSPerfAccumulate("addr_spec_success_once", io.out.fire() && s0_vaddr(VAddrBits-1, 12) === io.in.bits.src(0)(VAddrBits-1, 12) && io.isFirstIssue)
161  XSPerfAccumulate("addr_spec_failed_once", io.out.fire() && s0_vaddr(VAddrBits-1, 12) =/= io.in.bits.src(0)(VAddrBits-1, 12) && io.isFirstIssue)
162}
163
164
165// Load Pipeline Stage 1
166// TLB resp (send paddr to dcache)
167class LoadUnit_S1(implicit p: Parameters) extends XSModule {
168  val io = IO(new Bundle() {
169    val in = Flipped(Decoupled(new LsPipelineBundle))
170    val out = Decoupled(new LsPipelineBundle)
171    val dtlbResp = Flipped(DecoupledIO(new TlbResp))
172    val dcachePAddr = Output(UInt(PAddrBits.W))
173    val dcacheKill = Output(Bool())
174    val dcacheBankConflict = Input(Bool())
175    val fullForwardFast = Output(Bool())
176    val sbuffer = new LoadForwardQueryIO
177    val lsq = new PipeLoadForwardQueryIO
178    val loadViolationQueryReq = Decoupled(new LoadViolationQueryReq)
179    val rsFeedback = ValidIO(new RSFeedback)
180    val csrCtrl = Flipped(new CustomCSRCtrlIO)
181    val needLdVioCheckRedo = Output(Bool())
182  })
183
184  val s1_uop = io.in.bits.uop
185  val s1_paddr = io.dtlbResp.bits.paddr
186  // af & pf exception were modified below.
187  val s1_exception = ExceptionNO.selectByFu(io.out.bits.uop.cf.exceptionVec, lduCfg).asUInt.orR
188  val s1_tlb_miss = io.dtlbResp.bits.miss
189  val s1_mask = io.in.bits.mask
190  val s1_bank_conflict = io.dcacheBankConflict
191
192  io.out.bits := io.in.bits // forwardXX field will be updated in s1
193
194  io.dtlbResp.ready := true.B
195
196  io.dcachePAddr := s1_paddr
197  //io.dcacheKill := s1_tlb_miss || s1_exception || s1_mmio
198  io.dcacheKill := s1_tlb_miss || s1_exception
199  // load forward query datapath
200  io.sbuffer.valid := io.in.valid && !(s1_exception || s1_tlb_miss)
201  io.sbuffer.vaddr := io.in.bits.vaddr
202  io.sbuffer.paddr := s1_paddr
203  io.sbuffer.uop := s1_uop
204  io.sbuffer.sqIdx := s1_uop.sqIdx
205  io.sbuffer.mask := s1_mask
206  io.sbuffer.pc := s1_uop.cf.pc // FIXME: remove it
207
208  io.lsq.valid := io.in.valid && !(s1_exception || s1_tlb_miss)
209  io.lsq.vaddr := io.in.bits.vaddr
210  io.lsq.paddr := s1_paddr
211  io.lsq.uop := s1_uop
212  io.lsq.sqIdx := s1_uop.sqIdx
213  io.lsq.sqIdxMask := DontCare // will be overwritten by sqIdxMask pre-generated in s0
214  io.lsq.mask := s1_mask
215  io.lsq.pc := s1_uop.cf.pc // FIXME: remove it
216
217  // ld-ld violation query
218  io.loadViolationQueryReq.valid := io.in.valid && !(s1_exception || s1_tlb_miss)
219  io.loadViolationQueryReq.bits.paddr := s1_paddr
220  io.loadViolationQueryReq.bits.uop := s1_uop
221
222  // Generate forwardMaskFast to wake up insts earlier
223  val forwardMaskFast = io.lsq.forwardMaskFast.asUInt | io.sbuffer.forwardMaskFast.asUInt
224  io.fullForwardFast := (~forwardMaskFast & s1_mask) === 0.U
225
226  // Generate feedback signal caused by:
227  // * dcache bank conflict
228  // * need redo ld-ld violation check
229  val needLdVioCheckRedo = io.loadViolationQueryReq.valid &&
230    !io.loadViolationQueryReq.ready &&
231    RegNext(io.csrCtrl.ldld_vio_check_enable)
232  io.needLdVioCheckRedo := needLdVioCheckRedo
233  io.rsFeedback.valid := io.in.valid && (s1_bank_conflict || needLdVioCheckRedo)
234  io.rsFeedback.bits.hit := false.B // we have found s1_bank_conflict / re do ld-ld violation check
235  io.rsFeedback.bits.rsIdx := io.in.bits.rsIdx
236  io.rsFeedback.bits.flushState := io.in.bits.ptwBack
237  io.rsFeedback.bits.sourceType := Mux(s1_bank_conflict, RSFeedbackType.bankConflict, RSFeedbackType.ldVioCheckRedo)
238  io.rsFeedback.bits.dataInvalidSqIdx := DontCare
239
240  // if replay is detected in load_s1,
241  // load inst will be canceled immediately
242  io.out.valid := io.in.valid && !io.rsFeedback.valid
243  io.out.bits.paddr := s1_paddr
244  io.out.bits.tlbMiss := s1_tlb_miss
245
246  // current ori test will cause the case of ldest == 0, below will be modifeid in the future.
247  // af & pf exception were modified
248  io.out.bits.uop.cf.exceptionVec(loadPageFault) := io.dtlbResp.bits.excp.pf.ld
249  io.out.bits.uop.cf.exceptionVec(loadAccessFault) := io.dtlbResp.bits.excp.af.ld
250
251  io.out.bits.ptwBack := io.dtlbResp.bits.ptwBack
252  io.out.bits.rsIdx := io.in.bits.rsIdx
253
254  io.out.bits.isSoftPrefetch := io.in.bits.isSoftPrefetch
255
256  io.in.ready := !io.in.valid || io.out.ready
257
258  XSPerfAccumulate("in_valid", io.in.valid)
259  XSPerfAccumulate("in_fire", io.in.fire)
260  XSPerfAccumulate("in_fire_first_issue", io.in.fire && io.in.bits.isFirstIssue)
261  XSPerfAccumulate("tlb_miss", io.in.fire && s1_tlb_miss)
262  XSPerfAccumulate("tlb_miss_first_issue", io.in.fire && s1_tlb_miss && io.in.bits.isFirstIssue)
263  XSPerfAccumulate("stall_out", io.out.valid && !io.out.ready)
264}
265
266// Load Pipeline Stage 2
267// DCache resp
268class LoadUnit_S2(implicit p: Parameters) extends XSModule with HasLoadHelper {
269  val io = IO(new Bundle() {
270    val in = Flipped(Decoupled(new LsPipelineBundle))
271    val out = Decoupled(new LsPipelineBundle)
272    val rsFeedback = ValidIO(new RSFeedback)
273    val dcacheResp = Flipped(DecoupledIO(new DCacheWordResp))
274    val pmpResp = Flipped(new PMPRespBundle())
275    val lsq = new LoadForwardQueryIO
276    val dataInvalidSqIdx = Input(UInt())
277    val sbuffer = new LoadForwardQueryIO
278    val dataForwarded = Output(Bool())
279    val dcacheRequireReplay = Output(Bool())
280    val fullForward = Output(Bool())
281    val fastpath = Output(new LoadToLoadIO)
282    val dcache_kill = Output(Bool())
283    val delayedLoadError = Output(Bool())
284    val loadViolationQueryResp = Flipped(Valid(new LoadViolationQueryResp))
285    val csrCtrl = Flipped(new CustomCSRCtrlIO)
286    val sentFastUop = Input(Bool())
287    val static_pm = Input(Valid(Bool())) // valid for static, bits for mmio
288  })
289
290  val pmp = WireInit(io.pmpResp)
291  when (io.static_pm.valid) {
292    pmp.ld := false.B
293    pmp.st := false.B
294    pmp.instr := false.B
295    pmp.mmio := io.static_pm.bits
296  }
297
298  val s2_is_prefetch = io.in.bits.isSoftPrefetch
299
300  // exception that may cause load addr to be invalid / illegal
301  //
302  // if such exception happen, that inst and its exception info
303  // will be force writebacked to rob
304  val s2_exception_vec = WireInit(io.in.bits.uop.cf.exceptionVec)
305  s2_exception_vec(loadAccessFault) := io.in.bits.uop.cf.exceptionVec(loadAccessFault) || pmp.ld
306  // soft prefetch will not trigger any exception (but ecc error interrupt may be triggered)
307  when (s2_is_prefetch) {
308    s2_exception_vec := 0.U.asTypeOf(s2_exception_vec.cloneType)
309  }
310  val s2_exception = ExceptionNO.selectByFu(s2_exception_vec, lduCfg).asUInt.orR
311
312  // writeback access fault caused by ecc error / bus error
313  //
314  // * ecc data error is slow to generate, so we will not use it until load stage 3
315  // * in load stage 3, an extra signal io.load_error will be used to
316
317  // now cache ecc error will raise an access fault
318  // at the same time, error info (including error paddr) will be write to
319  // an customized CSR "CACHE_ERROR"
320  if (EnableAccurateLoadError) {
321    io.delayedLoadError := io.dcacheResp.bits.error_delayed &&
322      io.csrCtrl.cache_error_enable &&
323      RegNext(io.out.valid)
324  } else {
325    io.delayedLoadError := false.B
326  }
327
328  val actually_mmio = pmp.mmio
329  val s2_uop = io.in.bits.uop
330  val s2_mask = io.in.bits.mask
331  val s2_paddr = io.in.bits.paddr
332  val s2_tlb_miss = io.in.bits.tlbMiss
333  val s2_mmio = !s2_is_prefetch && actually_mmio && !s2_exception
334  val s2_cache_miss = io.dcacheResp.bits.miss
335  val s2_cache_replay = io.dcacheResp.bits.replay
336  val s2_cache_tag_error = io.dcacheResp.bits.tag_error
337  val s2_forward_fail = io.lsq.matchInvalid || io.sbuffer.matchInvalid
338  val s2_ldld_violation = io.loadViolationQueryResp.valid &&
339    io.loadViolationQueryResp.bits.have_violation &&
340    RegNext(io.csrCtrl.ldld_vio_check_enable)
341  val s2_data_invalid = io.lsq.dataInvalid && !s2_forward_fail && !s2_ldld_violation && !s2_exception
342
343  io.dcache_kill := pmp.ld || pmp.mmio // move pmp resp kill to outside
344  io.dcacheResp.ready := true.B
345  val dcacheShouldResp = !(s2_tlb_miss || s2_exception || s2_mmio || s2_is_prefetch)
346  assert(!(io.in.valid && (dcacheShouldResp && !io.dcacheResp.valid)), "DCache response got lost")
347
348  // merge forward result
349  // lsq has higher priority than sbuffer
350  val forwardMask = Wire(Vec(8, Bool()))
351  val forwardData = Wire(Vec(8, UInt(8.W)))
352
353  val fullForward = (~forwardMask.asUInt & s2_mask) === 0.U && !io.lsq.dataInvalid
354  io.lsq := DontCare
355  io.sbuffer := DontCare
356  io.fullForward := fullForward
357
358  // generate XLEN/8 Muxs
359  for (i <- 0 until XLEN / 8) {
360    forwardMask(i) := io.lsq.forwardMask(i) || io.sbuffer.forwardMask(i)
361    forwardData(i) := Mux(io.lsq.forwardMask(i), io.lsq.forwardData(i), io.sbuffer.forwardData(i))
362  }
363
364  XSDebug(io.out.fire(), "[FWD LOAD RESP] pc %x fwd %x(%b) + %x(%b)\n",
365    s2_uop.cf.pc,
366    io.lsq.forwardData.asUInt, io.lsq.forwardMask.asUInt,
367    io.in.bits.forwardData.asUInt, io.in.bits.forwardMask.asUInt
368  )
369
370  // data merge
371  val rdataVec = VecInit((0 until XLEN / 8).map(j =>
372    Mux(forwardMask(j), forwardData(j), io.dcacheResp.bits.data(8*(j+1)-1, 8*j))))
373  val rdata = rdataVec.asUInt
374  val rdataSel = LookupTree(s2_paddr(2, 0), List(
375    "b000".U -> rdata(63, 0),
376    "b001".U -> rdata(63, 8),
377    "b010".U -> rdata(63, 16),
378    "b011".U -> rdata(63, 24),
379    "b100".U -> rdata(63, 32),
380    "b101".U -> rdata(63, 40),
381    "b110".U -> rdata(63, 48),
382    "b111".U -> rdata(63, 56)
383  ))
384  val rdataPartialLoad = rdataHelper(s2_uop, rdataSel)
385
386  io.out.valid := io.in.valid && !s2_tlb_miss && !s2_data_invalid
387  // Inst will be canceled in store queue / lsq,
388  // so we do not need to care about flush in load / store unit's out.valid
389  io.out.bits := io.in.bits
390  io.out.bits.data := rdataPartialLoad
391  // when exception occurs, set it to not miss and let it write back to rob (via int port)
392  if (EnableFastForward) {
393    io.out.bits.miss := s2_cache_miss &&
394      !s2_exception &&
395      !s2_forward_fail &&
396      !s2_ldld_violation &&
397      !fullForward &&
398      !s2_is_prefetch
399  } else {
400    io.out.bits.miss := s2_cache_miss &&
401      !s2_exception &&
402      !s2_forward_fail &&
403      !s2_ldld_violation &&
404      !s2_is_prefetch
405  }
406  io.out.bits.uop.ctrl.fpWen := io.in.bits.uop.ctrl.fpWen && !s2_exception
407  // if forward fail, replay this inst from fetch
408  val forwardFailReplay = s2_forward_fail && !s2_mmio && !s2_is_prefetch && !s2_tlb_miss
409  // if ld-ld violation is detected, replay from this inst from fetch
410  val ldldVioReplay = s2_ldld_violation && !s2_mmio && !s2_is_prefetch && !s2_tlb_miss
411  val s2_need_replay_from_fetch = (s2_forward_fail || s2_ldld_violation) && !s2_mmio && !s2_is_prefetch && !s2_tlb_miss
412  io.out.bits.uop.ctrl.replayInst := s2_need_replay_from_fetch
413  io.out.bits.mmio := s2_mmio
414  io.out.bits.uop.ctrl.flushPipe := s2_mmio && io.sentFastUop
415  io.out.bits.uop.cf.exceptionVec := s2_exception_vec // cache error not included
416
417  // For timing reasons, sometimes we can not let
418  // io.out.bits.miss := s2_cache_miss && !s2_exception && !fullForward
419  // We use io.dataForwarded instead. It means:
420  // 1. Forward logic have prepared all data needed,
421  //    and dcache query is no longer needed.
422  // 2. ... or data cache tag error is detected, this kind of inst
423  //    will not update miss queue. That is to say, if miss, that inst
424  //    may not be refilled
425  // Such inst will be writebacked from load queue.
426  io.dataForwarded := s2_cache_miss && !s2_exception && !s2_forward_fail &&
427    (fullForward || io.csrCtrl.cache_error_enable && s2_cache_tag_error)
428  // io.out.bits.forwardX will be send to lq
429  io.out.bits.forwardMask := forwardMask
430  // data retbrived from dcache is also included in io.out.bits.forwardData
431  io.out.bits.forwardData := rdataVec
432
433  io.in.ready := io.out.ready || !io.in.valid
434
435  // feedback tlb result to RS
436  io.rsFeedback.valid := io.in.valid
437  val s2_need_replay_from_rs = Wire(Bool())
438  if (EnableFastForward) {
439    s2_need_replay_from_rs :=
440      s2_tlb_miss || // replay if dtlb miss
441      s2_cache_replay && !s2_is_prefetch && !s2_forward_fail && !s2_ldld_violation && !s2_mmio && !s2_exception && !fullForward || // replay if dcache miss queue full / busy
442      s2_data_invalid && !s2_is_prefetch && !s2_forward_fail && !s2_ldld_violation // replay if store to load forward data is not ready
443  } else {
444    // Note that if all parts of data are available in sq / sbuffer, replay required by dcache will not be scheduled
445    s2_need_replay_from_rs :=
446      s2_tlb_miss || // replay if dtlb miss
447      s2_cache_replay && !s2_is_prefetch && !s2_forward_fail && !s2_ldld_violation && !s2_mmio && !s2_exception && !io.dataForwarded || // replay if dcache miss queue full / busy
448      s2_data_invalid && !s2_is_prefetch && !s2_forward_fail && !s2_ldld_violation // replay if store to load forward data is not ready
449  }
450  assert(!RegNext(io.in.valid && s2_need_replay_from_rs && s2_need_replay_from_fetch))
451  io.rsFeedback.bits.hit := !s2_need_replay_from_rs
452  io.rsFeedback.bits.rsIdx := io.in.bits.rsIdx
453  io.rsFeedback.bits.flushState := io.in.bits.ptwBack
454  // feedback source priority: tlbMiss > dataInvalid > mshrFull
455  // general case priority: tlbMiss > exception (include forward_fail / ldld_violation) > mmio > dataInvalid > mshrFull > normal miss / hit
456  io.rsFeedback.bits.sourceType := Mux(s2_tlb_miss, RSFeedbackType.tlbMiss,
457    Mux(s2_data_invalid,
458      RSFeedbackType.dataInvalid,
459      RSFeedbackType.mshrFull
460    )
461  )
462  io.rsFeedback.bits.dataInvalidSqIdx.value := io.dataInvalidSqIdx
463  io.rsFeedback.bits.dataInvalidSqIdx.flag := DontCare
464
465  // s2_cache_replay is quite slow to generate, send it separately to LQ
466  if (EnableFastForward) {
467    io.dcacheRequireReplay := s2_cache_replay && !fullForward
468  } else {
469    io.dcacheRequireReplay := s2_cache_replay &&
470      !io.rsFeedback.bits.hit &&
471      !io.dataForwarded &&
472      !s2_is_prefetch &&
473      io.out.bits.miss
474  }
475
476  // fast load to load forward
477  io.fastpath.valid := RegNext(io.out.valid) // for debug only
478  io.fastpath.data := RegNext(io.out.bits.data)
479
480
481  XSDebug(io.out.fire(), "[DCACHE LOAD RESP] pc %x rdata %x <- D$ %x + fwd %x(%b)\n",
482    s2_uop.cf.pc, rdataPartialLoad, io.dcacheResp.bits.data,
483    forwardData.asUInt, forwardMask.asUInt
484  )
485
486  XSPerfAccumulate("in_valid", io.in.valid)
487  XSPerfAccumulate("in_fire", io.in.fire)
488  XSPerfAccumulate("in_fire_first_issue", io.in.fire && io.in.bits.isFirstIssue)
489  XSPerfAccumulate("dcache_miss", io.in.fire && s2_cache_miss)
490  XSPerfAccumulate("dcache_miss_first_issue", io.in.fire && s2_cache_miss && io.in.bits.isFirstIssue)
491  XSPerfAccumulate("full_forward", io.in.valid && fullForward)
492  XSPerfAccumulate("dcache_miss_full_forward", io.in.valid && s2_cache_miss && fullForward)
493  XSPerfAccumulate("replay",  io.rsFeedback.valid && !io.rsFeedback.bits.hit)
494  XSPerfAccumulate("replay_tlb_miss", io.rsFeedback.valid && !io.rsFeedback.bits.hit && s2_tlb_miss)
495  XSPerfAccumulate("replay_cache", io.rsFeedback.valid && !io.rsFeedback.bits.hit && !s2_tlb_miss && s2_cache_replay)
496  XSPerfAccumulate("stall_out", io.out.valid && !io.out.ready)
497  XSPerfAccumulate("replay_from_fetch_forward", io.out.valid && forwardFailReplay)
498  XSPerfAccumulate("replay_from_fetch_load_vio", io.out.valid && ldldVioReplay)
499}
500
501class LoadUnit(implicit p: Parameters) extends XSModule
502  with HasLoadHelper
503  with HasPerfEvents
504  with HasDCacheParameters
505{
506  val io = IO(new Bundle() {
507    val ldin = Flipped(Decoupled(new ExuInput))
508    val ldout = Decoupled(new ExuOutput)
509    val redirect = Flipped(ValidIO(new Redirect))
510    val feedbackSlow = ValidIO(new RSFeedback)
511    val feedbackFast = ValidIO(new RSFeedback)
512    val rsIdx = Input(UInt(log2Up(IssQueSize).W))
513    val isFirstIssue = Input(Bool())
514    val dcache = new DCacheLoadIO
515    val sbuffer = new LoadForwardQueryIO
516    val lsq = new LoadToLsqIO
517    val refill = Flipped(ValidIO(new Refill))
518    val fastUop = ValidIO(new MicroOp) // early wakeup signal generated in load_s1, send to RS in load_s2
519    val trigger = Vec(3, new LoadUnitTriggerIO)
520
521    val tlb = new TlbRequestIO
522    val pmp = Flipped(new PMPRespBundle()) // arrive same to tlb now
523
524    val fastpathOut = Output(new LoadToLoadIO)
525    val fastpathIn = Input(Vec(LoadPipelineWidth, new LoadToLoadIO))
526    val loadFastMatch = Input(UInt(exuParameters.LduCnt.W))
527
528    val delayedLoadError = Output(Bool()) // load ecc error
529    // Note that io.delayedLoadError and io.lsq.delayedLoadError is different
530
531    val csrCtrl = Flipped(new CustomCSRCtrlIO)
532  })
533
534  val load_s0 = Module(new LoadUnit_S0)
535  val load_s1 = Module(new LoadUnit_S1)
536  val load_s2 = Module(new LoadUnit_S2)
537
538  load_s0.io.in <> io.ldin
539  load_s0.io.dtlbReq <> io.tlb.req
540  load_s0.io.dcacheReq <> io.dcache.req
541  load_s0.io.rsIdx := io.rsIdx
542  load_s0.io.isFirstIssue := io.isFirstIssue
543  load_s0.io.fastpath := io.fastpathIn
544  load_s0.io.loadFastMatch := io.loadFastMatch
545
546  PipelineConnect(load_s0.io.out, load_s1.io.in, true.B, load_s0.io.out.bits.uop.robIdx.needFlush(io.redirect))
547
548  load_s1.io.dtlbResp <> io.tlb.resp
549  io.dcache.s1_paddr <> load_s1.io.dcachePAddr
550  io.dcache.s1_kill <> load_s1.io.dcacheKill
551  load_s1.io.sbuffer <> io.sbuffer
552  load_s1.io.lsq <> io.lsq.forward
553  load_s1.io.loadViolationQueryReq <> io.lsq.loadViolationQuery.req
554  load_s1.io.dcacheBankConflict <> io.dcache.s1_bank_conflict
555  load_s1.io.csrCtrl <> io.csrCtrl
556
557  PipelineConnect(load_s1.io.out, load_s2.io.in, true.B, load_s1.io.out.bits.uop.robIdx.needFlush(io.redirect))
558
559  io.dcache.s2_kill := load_s2.io.dcache_kill // to kill mmio resp which are redirected
560  load_s2.io.dcacheResp <> io.dcache.resp
561  load_s2.io.pmpResp <> io.pmp
562  load_s2.io.static_pm := RegNext(io.tlb.resp.bits.static_pm)
563  load_s2.io.lsq.forwardData <> io.lsq.forward.forwardData
564  load_s2.io.lsq.forwardMask <> io.lsq.forward.forwardMask
565  load_s2.io.lsq.forwardMaskFast <> io.lsq.forward.forwardMaskFast // should not be used in load_s2
566  load_s2.io.lsq.dataInvalid <> io.lsq.forward.dataInvalid
567  load_s2.io.lsq.matchInvalid <> io.lsq.forward.matchInvalid
568  load_s2.io.sbuffer.forwardData <> io.sbuffer.forwardData
569  load_s2.io.sbuffer.forwardMask <> io.sbuffer.forwardMask
570  load_s2.io.sbuffer.forwardMaskFast <> io.sbuffer.forwardMaskFast // should not be used in load_s2
571  load_s2.io.sbuffer.dataInvalid <> io.sbuffer.dataInvalid // always false
572  load_s2.io.sbuffer.matchInvalid <> io.sbuffer.matchInvalid
573  load_s2.io.dataForwarded <> io.lsq.loadDataForwarded
574  load_s2.io.fastpath <> io.fastpathOut
575  load_s2.io.dataInvalidSqIdx := io.lsq.forward.dataInvalidSqIdx // provide dataInvalidSqIdx to make wakeup faster
576  load_s2.io.loadViolationQueryResp <> io.lsq.loadViolationQuery.resp
577  load_s2.io.csrCtrl <> io.csrCtrl
578  load_s2.io.sentFastUop := io.fastUop.valid
579
580  // actually load s3
581  io.lsq.dcacheRequireReplay := load_s2.io.dcacheRequireReplay
582  io.lsq.delayedLoadError := load_s2.io.delayedLoadError
583
584  // feedback tlb miss / dcache miss queue full
585  io.feedbackSlow.valid := RegNext(load_s2.io.rsFeedback.valid && !load_s2.io.out.bits.uop.robIdx.needFlush(io.redirect))
586  io.feedbackSlow.bits := RegNext(load_s2.io.rsFeedback.bits)
587  val s3_replay_for_mshrfull = RegNext(!load_s2.io.rsFeedback.bits.hit && load_s2.io.rsFeedback.bits.sourceType === RSFeedbackType.mshrFull)
588  val s3_refill_hit_load_paddr = refill_addr_hit(RegNext(load_s2.io.out.bits.paddr), io.refill.bits.addr)
589  // update replay request
590  io.feedbackSlow.bits.hit := RegNext(load_s2.io.rsFeedback.bits).hit ||
591    s3_refill_hit_load_paddr && s3_replay_for_mshrfull
592
593  // feedback bank conflict / ld-vio check struct hazard to rs
594  io.feedbackFast.bits := RegNext(load_s1.io.rsFeedback.bits)
595  io.feedbackFast.valid := RegNext(load_s1.io.rsFeedback.valid && !load_s1.io.out.bits.uop.robIdx.needFlush(io.redirect))
596  // If replay is reported at load_s1, inst will be canceled (will not enter load_s2),
597  // in that case:
598  // * replay should not be reported twice
599  assert(!(RegNext(io.feedbackFast.valid) && io.feedbackSlow.valid))
600  // * io.fastUop.valid should not be reported
601  assert(!RegNext(io.feedbackFast.valid && io.fastUop.valid))
602
603  // pre-calcuate sqIdx mask in s0, then send it to lsq in s1 for forwarding
604  val sqIdxMaskReg = RegNext(UIntToMask(load_s0.io.in.bits.uop.sqIdx.value, StoreQueueSize))
605  io.lsq.forward.sqIdxMask := sqIdxMaskReg
606
607  // // use s2_hit_way to select data received in s1
608  // load_s2.io.dcacheResp.bits.data := Mux1H(RegNext(io.dcache.s1_hit_way), RegNext(io.dcache.s1_data))
609  // assert(load_s2.io.dcacheResp.bits.data === io.dcache.resp.bits.data)
610
611  // now io.fastUop.valid is sent to RS in load_s2
612  io.fastUop.valid := RegNext(
613    io.dcache.s1_hit_way.orR && // dcache hit
614    !io.dcache.s1_disable_fast_wakeup &&  // load fast wakeup should be disabled when dcache data read is not ready
615    load_s1.io.in.valid && // valid laod request
616    !load_s1.io.dtlbResp.bits.fast_miss && // not mmio or tlb miss, pf / af not included here
617    !io.lsq.forward.dataInvalidFast && // forward failed
618    !load_s1.io.needLdVioCheckRedo // load-load violation check: load paddr cam struct hazard
619  ) && !RegNext(load_s1.io.out.bits.uop.robIdx.needFlush(io.redirect))
620  io.fastUop.bits := RegNext(load_s1.io.out.bits.uop)
621
622  XSDebug(load_s0.io.out.valid,
623    p"S0: pc ${Hexadecimal(load_s0.io.out.bits.uop.cf.pc)}, lId ${Hexadecimal(load_s0.io.out.bits.uop.lqIdx.asUInt)}, " +
624    p"vaddr ${Hexadecimal(load_s0.io.out.bits.vaddr)}, mask ${Hexadecimal(load_s0.io.out.bits.mask)}\n")
625  XSDebug(load_s1.io.out.valid,
626    p"S1: pc ${Hexadecimal(load_s1.io.out.bits.uop.cf.pc)}, lId ${Hexadecimal(load_s1.io.out.bits.uop.lqIdx.asUInt)}, tlb_miss ${io.tlb.resp.bits.miss}, " +
627    p"paddr ${Hexadecimal(load_s1.io.out.bits.paddr)}, mmio ${load_s1.io.out.bits.mmio}\n")
628
629  // writeback to LSQ
630  // Current dcache use MSHR
631  // Load queue will be updated at s2 for both hit/miss int/fp load
632  io.lsq.loadIn.valid := load_s2.io.out.valid
633  io.lsq.loadIn.bits := load_s2.io.out.bits
634
635  // write to rob and writeback bus
636  val s2_wb_valid = load_s2.io.out.valid && !load_s2.io.out.bits.miss && !load_s2.io.out.bits.mmio
637
638  // Int load, if hit, will be writebacked at s2
639  val hitLoadOut = Wire(Valid(new ExuOutput))
640  hitLoadOut.valid := s2_wb_valid
641  hitLoadOut.bits.uop := load_s2.io.out.bits.uop
642  hitLoadOut.bits.data := load_s2.io.out.bits.data
643  hitLoadOut.bits.redirectValid := false.B
644  hitLoadOut.bits.redirect := DontCare
645  hitLoadOut.bits.debug.isMMIO := load_s2.io.out.bits.mmio
646  hitLoadOut.bits.debug.isPerfCnt := false.B
647  hitLoadOut.bits.debug.paddr := load_s2.io.out.bits.paddr
648  hitLoadOut.bits.debug.vaddr := load_s2.io.out.bits.vaddr
649  hitLoadOut.bits.fflags := DontCare
650
651  load_s2.io.out.ready := true.B
652
653  val load_wb_reg = RegNext(Mux(hitLoadOut.valid, hitLoadOut.bits, io.lsq.ldout.bits))
654  io.ldout.bits := load_wb_reg
655  io.ldout.valid := RegNext(hitLoadOut.valid) && !RegNext(load_s2.io.out.bits.uop.robIdx.needFlush(io.redirect)) ||
656    RegNext(io.lsq.ldout.valid) && !RegNext(io.lsq.ldout.bits.uop.robIdx.needFlush(io.redirect)) && !RegNext(hitLoadOut.valid)
657
658  // io.ldout.bits.uop.cf.exceptionVec(loadAccessFault) := load_wb_reg.uop.cf.exceptionVec(loadAccessFault) ||
659  //   hitLoadOut.valid && load_s2.io.delayedLoadError
660
661  // io.delayedLoadError := false.B
662
663  io.delayedLoadError := hitLoadOut.valid && load_s2.io.delayedLoadError
664
665  io.lsq.ldout.ready := !hitLoadOut.valid
666
667  when(io.feedbackSlow.valid && !io.feedbackSlow.bits.hit){
668    // when need replay from rs, inst should not be writebacked to rob
669    assert(RegNext(!hitLoadOut.valid))
670    // when need replay from rs
671    // * inst should not be writebacked to lq, or
672    // * lq state will be updated in load_s3 (next cycle)
673    assert(RegNext(!io.lsq.loadIn.valid) || RegNext(load_s2.io.dcacheRequireReplay))
674  }
675
676  val lastValidData = RegEnable(io.ldout.bits.data, io.ldout.fire())
677  val hitLoadAddrTriggerHitVec = Wire(Vec(3, Bool()))
678  val lqLoadAddrTriggerHitVec = io.lsq.trigger.lqLoadAddrTriggerHitVec
679  (0 until 3).map{i => {
680    val tdata2 = io.trigger(i).tdata2
681    val matchType = io.trigger(i).matchType
682    val tEnable = io.trigger(i).tEnable
683
684    hitLoadAddrTriggerHitVec(i) := TriggerCmp(load_s2.io.out.bits.vaddr, tdata2, matchType, tEnable)
685    io.trigger(i).addrHit := Mux(hitLoadOut.valid, hitLoadAddrTriggerHitVec(i), lqLoadAddrTriggerHitVec(i))
686    io.trigger(i).lastDataHit := TriggerCmp(lastValidData, tdata2, matchType, tEnable)
687  }}
688  io.lsq.trigger.hitLoadAddrTriggerHitVec := hitLoadAddrTriggerHitVec
689
690  val perfEvents = Seq(
691    ("load_s0_in_fire         ", load_s0.io.in.fire()                                                                                                            ),
692    ("stall_dcache            ", load_s0.io.out.valid && load_s0.io.out.ready && !load_s0.io.dcacheReq.ready                                                     ),
693    ("load_s1_in_fire         ", load_s1.io.in.fire                                                                                                              ),
694    ("load_s1_tlb_miss        ", load_s1.io.in.fire && load_s1.io.dtlbResp.bits.miss                                                                             ),
695    ("load_s2_in_fire         ", load_s2.io.in.fire                                                                                                              ),
696    ("load_s2_dcache_miss     ", load_s2.io.in.fire && load_s2.io.dcacheResp.bits.miss                                                                           ),
697    ("load_s2_replay          ", load_s2.io.rsFeedback.valid && !load_s2.io.rsFeedback.bits.hit                                                                  ),
698    ("load_s2_replay_tlb_miss ", load_s2.io.rsFeedback.valid && !load_s2.io.rsFeedback.bits.hit && load_s2.io.in.bits.tlbMiss                                    ),
699    ("load_s2_replay_cache    ", load_s2.io.rsFeedback.valid && !load_s2.io.rsFeedback.bits.hit && !load_s2.io.in.bits.tlbMiss && load_s2.io.dcacheResp.bits.miss),
700  )
701  generatePerfEvent()
702
703  // Will cause timing problem:
704  // ("load_to_load_forward    ", load_s0.io.loadFastMatch.orR && load_s0.io.in.fire()),
705
706  when(io.ldout.fire()){
707    XSDebug("ldout %x\n", io.ldout.bits.uop.cf.pc)
708  }
709}
710