xref: /XiangShan/src/main/scala/xiangshan/mem/pipeline/LoadUnit.scala (revision d87b76aa1c8b3309689888cbb9025cead93e6dd8)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import xiangshan._
24import xiangshan.backend.decode.ImmUnion
25import xiangshan.cache._
26import xiangshan.cache.mmu.{TlbPtwIO, TlbReq, TlbResp, TlbCmd, TlbRequestIO, TLB}
27
28class LoadToLsqIO(implicit p: Parameters) extends XSBundle {
29  val loadIn = ValidIO(new LsPipelineBundle)
30  val ldout = Flipped(DecoupledIO(new ExuOutput))
31  val loadDataForwarded = Output(Bool())
32  val needReplayFromRS = Output(Bool())
33  val forward = new PipeLoadForwardQueryIO
34}
35
36class LoadToLoadIO(implicit p: Parameters) extends XSBundle {
37  // load to load fast path is limited to ld (64 bit) used as vaddr src1 only
38  val data = UInt(XLEN.W)
39  val valid = Bool()
40}
41
42// Load Pipeline Stage 0
43// Generate addr, use addr to query DCache and DTLB
44class LoadUnit_S0(implicit p: Parameters) extends XSModule with HasDCacheParameters{
45  val io = IO(new Bundle() {
46    val in = Flipped(Decoupled(new ExuInput))
47    val out = Decoupled(new LsPipelineBundle)
48    val fastpath = Input(Vec(LoadPipelineWidth, new LoadToLoadIO))
49    val dtlbReq = DecoupledIO(new TlbReq)
50    val dcacheReq = DecoupledIO(new DCacheWordReq)
51    val rsIdx = Input(UInt(log2Up(IssQueSize).W))
52    val isFirstIssue = Input(Bool())
53    val loadFastMatch = Input(UInt(exuParameters.LduCnt.W))
54  })
55  require(LoadPipelineWidth == exuParameters.LduCnt)
56
57  val s0_uop = io.in.bits.uop
58  val imm12 = WireInit(s0_uop.ctrl.imm(11,0))
59
60  // slow vaddr from non-load insts
61  val slowpath_vaddr = io.in.bits.src(0) + SignExt(s0_uop.ctrl.imm(11,0), VAddrBits)
62  val slowpath_mask = genWmask(slowpath_vaddr, s0_uop.ctrl.fuOpType(1,0))
63
64  // fast vaddr from load insts
65  val fastpath_vaddrs = WireInit(VecInit(List.tabulate(LoadPipelineWidth)(i => {
66     io.fastpath(i).data + SignExt(s0_uop.ctrl.imm(11,0), VAddrBits)
67  })))
68  val fastpath_masks = WireInit(VecInit(List.tabulate(LoadPipelineWidth)(i => {
69     genWmask(fastpath_vaddrs(i), s0_uop.ctrl.fuOpType(1,0))
70  })))
71  val fastpath_vaddr = Mux1H(io.loadFastMatch, fastpath_vaddrs)
72  val fastpath_mask  = Mux1H(io.loadFastMatch, fastpath_masks)
73
74  // select vaddr from 2 alus
75  val s0_vaddr = Mux(io.loadFastMatch.orR, fastpath_vaddr, slowpath_vaddr)
76  val s0_mask  = Mux(io.loadFastMatch.orR, fastpath_mask, slowpath_mask)
77  XSPerfAccumulate("load_to_load_forward", io.loadFastMatch.orR && io.in.fire())
78
79  val isSoftPrefetch = Wire(Bool())
80  isSoftPrefetch := s0_uop.ctrl.isORI //it's a ORI but it exists in ldu, which means it's a softprefecth
81  val isSoftPrefetchRead = Wire(Bool())
82  val isSoftPrefetchWrite = Wire(Bool())
83  isSoftPrefetchRead := s0_uop.ctrl.isSoftPrefetchRead
84  isSoftPrefetchWrite := s0_uop.ctrl.isSoftPrefetchWrite
85
86  // query DTLB
87  io.dtlbReq.valid := io.in.valid
88  io.dtlbReq.bits.vaddr := s0_vaddr
89  io.dtlbReq.bits.cmd := TlbCmd.read
90  io.dtlbReq.bits.robIdx := s0_uop.robIdx
91  io.dtlbReq.bits.debug.pc := s0_uop.cf.pc
92  io.dtlbReq.bits.debug.isFirstIssue := io.isFirstIssue
93
94  // query DCache
95  io.dcacheReq.valid := io.in.valid
96  when (isSoftPrefetchRead) {
97    io.dcacheReq.bits.cmd  := MemoryOpConstants.M_PFR
98  }.elsewhen (isSoftPrefetchWrite) {
99    io.dcacheReq.bits.cmd  := MemoryOpConstants.M_PFW
100  }.otherwise {
101    io.dcacheReq.bits.cmd  := MemoryOpConstants.M_XRD
102  }
103  io.dcacheReq.bits.addr := s0_vaddr
104  io.dcacheReq.bits.mask := s0_mask
105  io.dcacheReq.bits.data := DontCare
106  when(isSoftPrefetch) {
107    io.dcacheReq.bits.instrtype := SOFT_PREFETCH.U
108  }.otherwise {
109    io.dcacheReq.bits.instrtype := LOAD_SOURCE.U
110  }
111
112  // TODO: update cache meta
113  io.dcacheReq.bits.id   := DontCare
114
115  val addrAligned = LookupTree(s0_uop.ctrl.fuOpType(1, 0), List(
116    "b00".U   -> true.B,                   //b
117    "b01".U   -> (s0_vaddr(0)    === 0.U), //h
118    "b10".U   -> (s0_vaddr(1, 0) === 0.U), //w
119    "b11".U   -> (s0_vaddr(2, 0) === 0.U)  //d
120  ))
121
122  io.out.valid := io.in.valid && io.dcacheReq.ready
123
124  io.out.bits := DontCare
125  io.out.bits.vaddr := s0_vaddr
126  io.out.bits.mask := s0_mask
127  io.out.bits.uop := s0_uop
128  io.out.bits.uop.cf.exceptionVec(loadAddrMisaligned) := !addrAligned
129  io.out.bits.rsIdx := io.rsIdx
130  io.out.bits.isFirstIssue := io.isFirstIssue
131  io.out.bits.isSoftPrefetch := isSoftPrefetch
132
133  io.in.ready := !io.in.valid || (io.out.ready && io.dcacheReq.ready)
134
135  XSDebug(io.dcacheReq.fire(),
136    p"[DCACHE LOAD REQ] pc ${Hexadecimal(s0_uop.cf.pc)}, vaddr ${Hexadecimal(s0_vaddr)}\n"
137  )
138  XSPerfAccumulate("in_valid", io.in.valid)
139  XSPerfAccumulate("in_fire", io.in.fire)
140  XSPerfAccumulate("in_fire_first_issue", io.in.valid && io.isFirstIssue)
141  XSPerfAccumulate("stall_out", io.out.valid && !io.out.ready && io.dcacheReq.ready)
142  XSPerfAccumulate("stall_dcache", io.out.valid && io.out.ready && !io.dcacheReq.ready)
143  XSPerfAccumulate("addr_spec_success", io.out.fire() && s0_vaddr(VAddrBits-1, 12) === io.in.bits.src(0)(VAddrBits-1, 12))
144  XSPerfAccumulate("addr_spec_failed", io.out.fire() && s0_vaddr(VAddrBits-1, 12) =/= io.in.bits.src(0)(VAddrBits-1, 12))
145  XSPerfAccumulate("addr_spec_success_once", io.out.fire() && s0_vaddr(VAddrBits-1, 12) === io.in.bits.src(0)(VAddrBits-1, 12) && io.isFirstIssue)
146  XSPerfAccumulate("addr_spec_failed_once", io.out.fire() && s0_vaddr(VAddrBits-1, 12) =/= io.in.bits.src(0)(VAddrBits-1, 12) && io.isFirstIssue)
147}
148
149
150// Load Pipeline Stage 1
151// TLB resp (send paddr to dcache)
152class LoadUnit_S1(implicit p: Parameters) extends XSModule {
153  val io = IO(new Bundle() {
154    val in = Flipped(Decoupled(new LsPipelineBundle))
155    val out = Decoupled(new LsPipelineBundle)
156    val dtlbResp = Flipped(DecoupledIO(new TlbResp))
157    val dcachePAddr = Output(UInt(PAddrBits.W))
158    val dcacheKill = Output(Bool())
159    val dcacheBankConflict = Input(Bool())
160    val fullForwardFast = Output(Bool())
161    val sbuffer = new LoadForwardQueryIO
162    val lsq = new PipeLoadForwardQueryIO
163    val rsFeedback = ValidIO(new RSFeedback)
164  })
165
166  val isSoftPrefetch = io.in.bits.isSoftPrefetch
167  val actually_execpt = io.dtlbResp.bits.excp.pf.ld || io.dtlbResp.bits.excp.af.ld || io.out.bits.uop.cf.exceptionVec(loadAddrMisaligned)
168  val actually_mmio = !io.dtlbResp.bits.miss && io.dtlbResp.bits.mmio
169
170  val softprefecth_mmio = isSoftPrefetch && actually_mmio //TODO, fix it
171  val softprefecth_excep = isSoftPrefetch && actually_execpt //TODO, fix it
172
173  val s1_uop = io.in.bits.uop
174  val s1_paddr = io.dtlbResp.bits.paddr
175  val s1_exception = selectLoad(io.out.bits.uop.cf.exceptionVec, false).asUInt.orR // af & pf exception were modified below.
176  val s1_tlb_miss = io.dtlbResp.bits.miss
177  //val s1_mmio = !s1_tlb_miss && io.dtlbResp.bits.mmio
178  val s1_mmio = !isSoftPrefetch && actually_mmio
179  val s1_mask = io.in.bits.mask
180  val s1_bank_conflict = io.dcacheBankConflict
181
182  io.out.bits := io.in.bits // forwardXX field will be updated in s1
183
184  io.dtlbResp.ready := true.B
185
186  // TOOD: PMA check
187  io.dcachePAddr := s1_paddr
188  //io.dcacheKill := s1_tlb_miss || s1_exception || s1_mmio
189  io.dcacheKill := s1_tlb_miss || actually_mmio || actually_execpt
190
191  // load forward query datapath
192  io.sbuffer.valid := io.in.valid && !(s1_exception || s1_tlb_miss)
193  io.sbuffer.vaddr := io.in.bits.vaddr
194  io.sbuffer.paddr := s1_paddr
195  io.sbuffer.uop := s1_uop
196  io.sbuffer.sqIdx := s1_uop.sqIdx
197  io.sbuffer.mask := s1_mask
198  io.sbuffer.pc := s1_uop.cf.pc // FIXME: remove it
199
200  io.lsq.valid := io.in.valid && !(s1_exception || s1_tlb_miss)
201  io.lsq.vaddr := io.in.bits.vaddr
202  io.lsq.paddr := s1_paddr
203  io.lsq.uop := s1_uop
204  io.lsq.sqIdx := s1_uop.sqIdx
205  io.lsq.sqIdxMask := DontCare // will be overwritten by sqIdxMask pre-generated in s0
206  io.lsq.mask := s1_mask
207  io.lsq.pc := s1_uop.cf.pc // FIXME: remove it
208
209  // Generate forwardMaskFast to wake up insts earlier
210  val forwardMaskFast = io.lsq.forwardMaskFast.asUInt | io.sbuffer.forwardMaskFast.asUInt
211  io.fullForwardFast := (~forwardMaskFast & s1_mask) === 0.U
212
213  // Generate feedback signal caused by dcache bank conflict
214  io.rsFeedback.valid := io.in.valid && s1_bank_conflict
215  io.rsFeedback.bits.hit := false.B // we have found s1_bank_conflict
216  io.rsFeedback.bits.rsIdx := io.in.bits.rsIdx
217  io.rsFeedback.bits.flushState := io.in.bits.ptwBack
218  io.rsFeedback.bits.sourceType := RSFeedbackType.bankConflict
219
220  io.out.valid := io.in.valid && !s1_bank_conflict // if bank conflict, load inst will be canceled immediately
221  io.out.bits.paddr := s1_paddr
222  io.out.bits.mmio := s1_mmio && !s1_exception
223  io.out.bits.tlbMiss := s1_tlb_miss
224
225  // current ori test will cause the case of ldest == 0, below will be modifeid in the future.
226  // af & pf exception were modified
227  io.out.bits.uop.cf.exceptionVec(loadPageFault) := !isSoftPrefetch && io.dtlbResp.bits.excp.pf.ld
228  io.out.bits.uop.cf.exceptionVec(loadAccessFault) := !isSoftPrefetch && io.dtlbResp.bits.excp.af.ld
229
230  io.out.bits.ptwBack := io.dtlbResp.bits.ptwBack
231  io.out.bits.rsIdx := io.in.bits.rsIdx
232
233  // soft prefetch stuff
234  io.out.bits.isSoftPrefetch := io.in.bits.isSoftPrefetch
235  io.out.bits.isSoftPreExcept := softprefecth_excep
236  io.out.bits.isSoftPremmio := softprefecth_mmio
237
238  io.in.ready := !io.in.valid || io.out.ready
239
240  XSPerfAccumulate("in_valid", io.in.valid)
241  XSPerfAccumulate("in_fire", io.in.fire)
242  XSPerfAccumulate("in_fire_first_issue", io.in.fire && io.in.bits.isFirstIssue)
243  XSPerfAccumulate("tlb_miss", io.in.fire && s1_tlb_miss)
244  XSPerfAccumulate("tlb_miss_first_issue", io.in.fire && s1_tlb_miss && io.in.bits.isFirstIssue)
245  XSPerfAccumulate("stall_out", io.out.valid && !io.out.ready)
246}
247
248// Load Pipeline Stage 2
249// DCache resp
250class LoadUnit_S2(implicit p: Parameters) extends XSModule with HasLoadHelper {
251  val io = IO(new Bundle() {
252    val in = Flipped(Decoupled(new LsPipelineBundle))
253    val out = Decoupled(new LsPipelineBundle)
254    val rsFeedback = ValidIO(new RSFeedback)
255    val dcacheResp = Flipped(DecoupledIO(new DCacheWordResp))
256    val lsq = new LoadForwardQueryIO
257    val sbuffer = new LoadForwardQueryIO
258    val dataForwarded = Output(Bool())
259    val needReplayFromRS = Output(Bool())
260    val fastpath = Output(new LoadToLoadIO)
261  })
262
263  val s2_uop = io.in.bits.uop
264  val s2_mask = io.in.bits.mask
265  val s2_paddr = io.in.bits.paddr
266  val s2_tlb_miss = io.in.bits.tlbMiss
267  val s2_data_invalid = io.lsq.dataInvalid
268  val s2_exception = selectLoad(io.in.bits.uop.cf.exceptionVec, false).asUInt.orR
269  val s2_mmio = io.in.bits.mmio && !s2_exception
270  val s2_cache_miss = io.dcacheResp.bits.miss
271  val s2_cache_replay = io.dcacheResp.bits.replay
272
273  val s2_cache_miss_enter = io.dcacheResp.bits.miss_enter //missReq enter the mshr successfully
274  val isSoftPreExcept = io.in.bits.isSoftPreExcept
275  val isSoftPremmio = io.in.bits.isSoftPremmio
276  // val cnt = RegInit(127.U)
277  // cnt := cnt + io.in.valid.asUInt
278  // val s2_forward_fail = io.lsq.matchInvalid || io.sbuffer.matchInvalid || cnt === 0.U
279
280  val s2_forward_fail = io.lsq.matchInvalid || io.sbuffer.matchInvalid
281
282  // assert(!s2_forward_fail)
283
284  io.dcacheResp.ready := true.B
285  val dcacheShouldResp = !(s2_tlb_miss || s2_exception || s2_mmio)
286  assert(!(io.in.valid && (dcacheShouldResp && !io.dcacheResp.valid) && (!isSoftPreExcept) && (!isSoftPremmio)), "DCache response got lost")
287
288  // merge forward result
289  // lsq has higher priority than sbuffer
290  val forwardMask = Wire(Vec(8, Bool()))
291  val forwardData = Wire(Vec(8, UInt(8.W)))
292
293  val fullForward = (~forwardMask.asUInt & s2_mask) === 0.U && !io.lsq.dataInvalid
294  io.lsq := DontCare
295  io.sbuffer := DontCare
296
297  // generate XLEN/8 Muxs
298  for (i <- 0 until XLEN / 8) {
299    forwardMask(i) := io.lsq.forwardMask(i) || io.sbuffer.forwardMask(i)
300    forwardData(i) := Mux(io.lsq.forwardMask(i), io.lsq.forwardData(i), io.sbuffer.forwardData(i))
301  }
302
303  XSDebug(io.out.fire(), "[FWD LOAD RESP] pc %x fwd %x(%b) + %x(%b)\n",
304    s2_uop.cf.pc,
305    io.lsq.forwardData.asUInt, io.lsq.forwardMask.asUInt,
306    io.in.bits.forwardData.asUInt, io.in.bits.forwardMask.asUInt
307  )
308
309  // data merge
310  val rdataVec = VecInit((0 until XLEN / 8).map(j =>
311    Mux(forwardMask(j), forwardData(j), io.dcacheResp.bits.data(8*(j+1)-1, 8*j))))
312  val rdata = rdataVec.asUInt
313  val rdataSel = LookupTree(s2_paddr(2, 0), List(
314    "b000".U -> rdata(63, 0),
315    "b001".U -> rdata(63, 8),
316    "b010".U -> rdata(63, 16),
317    "b011".U -> rdata(63, 24),
318    "b100".U -> rdata(63, 32),
319    "b101".U -> rdata(63, 40),
320    "b110".U -> rdata(63, 48),
321    "b111".U -> rdata(63, 56)
322  ))
323  val rdataPartialLoad = rdataHelper(s2_uop, rdataSel)
324
325  io.out.valid := io.in.valid && !s2_tlb_miss && !s2_data_invalid
326  // Inst will be canceled in store queue / lsq,
327  // so we do not need to care about flush in load / store unit's out.valid
328  io.out.bits := io.in.bits
329  io.out.bits.data := rdataPartialLoad
330  // when exception occurs, set it to not miss and let it write back to rob (via int port)
331  if (EnableFastForward) {
332    when(io.in.bits.isSoftPrefetch) {
333      io.out.bits.miss := s2_cache_miss && !s2_exception && !s2_forward_fail && !fullForward && !s2_cache_miss_enter && !isSoftPreExcept && !isSoftPremmio
334    }.otherwise {
335      io.out.bits.miss := s2_cache_miss && !s2_exception && !s2_forward_fail && !fullForward
336    }
337  } else {
338    when(io.in.bits.isSoftPrefetch) {
339      io.out.bits.miss := s2_cache_miss && !s2_exception && !s2_forward_fail && !s2_cache_miss_enter && !isSoftPreExcept && !isSoftPremmio
340    }.otherwise {
341      io.out.bits.miss := s2_cache_miss && !s2_exception && !s2_forward_fail
342    }
343  }
344  io.out.bits.uop.ctrl.fpWen := io.in.bits.uop.ctrl.fpWen && !s2_exception
345  // if forward fail, replay this inst
346  io.out.bits.uop.ctrl.replayInst := s2_forward_fail && !s2_mmio
347  io.out.bits.mmio := s2_mmio
348
349  // For timing reasons, sometimes we can not let
350  // io.out.bits.miss := s2_cache_miss && !s2_exception && !fullForward
351  // We use io.dataForwarded instead. It means forward logic have prepared all data needed,
352  // and dcache query is no longer needed.
353  // Such inst will be writebacked from load queue.
354  io.dataForwarded := s2_cache_miss && fullForward && !s2_exception && !s2_forward_fail
355  // io.out.bits.forwardX will be send to lq
356  io.out.bits.forwardMask := forwardMask
357  // data retbrived from dcache is also included in io.out.bits.forwardData
358  io.out.bits.forwardData := rdataVec
359
360  io.in.ready := io.out.ready || !io.in.valid
361
362
363  // feedback tlb result to RS
364  io.rsFeedback.valid := io.in.valid
365  when (io.in.bits.isSoftPrefetch) {
366    io.rsFeedback.bits.hit := (!s2_tlb_miss && (!s2_cache_replay || s2_mmio || s2_exception || fullForward) && !s2_data_invalid) || s2_cache_miss_enter || isSoftPreExcept || isSoftPremmio
367  }.otherwise {
368    io.rsFeedback.bits.hit := !s2_tlb_miss && (!s2_cache_replay || s2_mmio || s2_exception || fullForward) && !s2_data_invalid
369  }
370  io.rsFeedback.bits.rsIdx := io.in.bits.rsIdx
371  io.rsFeedback.bits.flushState := io.in.bits.ptwBack
372  io.rsFeedback.bits.sourceType := Mux(s2_tlb_miss, RSFeedbackType.tlbMiss,
373    Mux(io.lsq.dataInvalid,
374      RSFeedbackType.dataInvalid,
375      RSFeedbackType.mshrFull
376    )
377  )
378
379  // s2_cache_replay is quite slow to generate, send it separately to LQ
380  io.needReplayFromRS := s2_cache_replay && !fullForward
381
382  // fast load to load forward
383  io.fastpath.valid := io.in.valid // for debug only
384  io.fastpath.data := rdata // raw data
385
386
387  XSDebug(io.out.fire(), "[DCACHE LOAD RESP] pc %x rdata %x <- D$ %x + fwd %x(%b)\n",
388    s2_uop.cf.pc, rdataPartialLoad, io.dcacheResp.bits.data,
389    forwardData.asUInt, forwardMask.asUInt
390  )
391
392  XSPerfAccumulate("in_valid", io.in.valid)
393  XSPerfAccumulate("in_fire", io.in.fire)
394  XSPerfAccumulate("in_fire_first_issue", io.in.fire && io.in.bits.isFirstIssue)
395  XSPerfAccumulate("dcache_miss", io.in.fire && s2_cache_miss)
396  XSPerfAccumulate("dcache_miss_first_issue", io.in.fire && s2_cache_miss && io.in.bits.isFirstIssue)
397  XSPerfAccumulate("full_forward", io.in.valid && fullForward)
398  XSPerfAccumulate("dcache_miss_full_forward", io.in.valid && s2_cache_miss && fullForward)
399  XSPerfAccumulate("replay",  io.rsFeedback.valid && !io.rsFeedback.bits.hit)
400  XSPerfAccumulate("replay_tlb_miss", io.rsFeedback.valid && !io.rsFeedback.bits.hit && s2_tlb_miss)
401  XSPerfAccumulate("replay_cache", io.rsFeedback.valid && !io.rsFeedback.bits.hit && !s2_tlb_miss && s2_cache_replay)
402  XSPerfAccumulate("stall_out", io.out.valid && !io.out.ready)
403}
404
405class LoadUnit(implicit p: Parameters) extends XSModule with HasLoadHelper {
406  val io = IO(new Bundle() {
407    val ldin = Flipped(Decoupled(new ExuInput))
408    val ldout = Decoupled(new ExuOutput)
409    val redirect = Flipped(ValidIO(new Redirect))
410    val flush = Input(Bool())
411    val feedbackSlow = ValidIO(new RSFeedback)
412    val feedbackFast = ValidIO(new RSFeedback)
413    val rsIdx = Input(UInt(log2Up(IssQueSize).W))
414    val isFirstIssue = Input(Bool())
415    val dcache = new DCacheLoadIO
416    val sbuffer = new LoadForwardQueryIO
417    val lsq = new LoadToLsqIO
418    val fastUop = ValidIO(new MicroOp) // early wakeup signal generated in load_s1
419
420    val tlb = new TlbRequestIO
421    val fastpathOut = Output(new LoadToLoadIO)
422    val fastpathIn = Input(Vec(LoadPipelineWidth, new LoadToLoadIO))
423    val loadFastMatch = Input(UInt(exuParameters.LduCnt.W))
424  })
425
426  val load_s0 = Module(new LoadUnit_S0)
427  val load_s1 = Module(new LoadUnit_S1)
428  val load_s2 = Module(new LoadUnit_S2)
429
430  load_s0.io.in <> io.ldin
431  load_s0.io.dtlbReq <> io.tlb.req
432  load_s0.io.dcacheReq <> io.dcache.req
433  load_s0.io.rsIdx := io.rsIdx
434  load_s0.io.isFirstIssue := io.isFirstIssue
435  load_s0.io.fastpath := io.fastpathIn
436  load_s0.io.loadFastMatch := io.loadFastMatch
437
438  PipelineConnect(load_s0.io.out, load_s1.io.in, true.B, load_s0.io.out.bits.uop.robIdx.needFlush(io.redirect, io.flush))
439
440  load_s1.io.dtlbResp <> io.tlb.resp
441  io.dcache.s1_paddr <> load_s1.io.dcachePAddr
442  io.dcache.s1_kill <> load_s1.io.dcacheKill
443  load_s1.io.sbuffer <> io.sbuffer
444  load_s1.io.lsq <> io.lsq.forward
445  load_s1.io.dcacheBankConflict <> io.dcache.s1_bank_conflict
446
447  PipelineConnect(load_s1.io.out, load_s2.io.in, true.B, load_s1.io.out.bits.uop.robIdx.needFlush(io.redirect, io.flush))
448
449  load_s2.io.dcacheResp <> io.dcache.resp
450  load_s2.io.lsq.forwardData <> io.lsq.forward.forwardData
451  load_s2.io.lsq.forwardMask <> io.lsq.forward.forwardMask
452  load_s2.io.lsq.forwardMaskFast <> io.lsq.forward.forwardMaskFast // should not be used in load_s2
453  load_s2.io.lsq.dataInvalid <> io.lsq.forward.dataInvalid
454  load_s2.io.lsq.matchInvalid <> io.lsq.forward.matchInvalid
455  load_s2.io.sbuffer.forwardData <> io.sbuffer.forwardData
456  load_s2.io.sbuffer.forwardMask <> io.sbuffer.forwardMask
457  load_s2.io.sbuffer.forwardMaskFast <> io.sbuffer.forwardMaskFast // should not be used in load_s2
458  load_s2.io.sbuffer.dataInvalid <> io.sbuffer.dataInvalid // always false
459  load_s2.io.sbuffer.matchInvalid <> io.sbuffer.matchInvalid
460  load_s2.io.dataForwarded <> io.lsq.loadDataForwarded
461  load_s2.io.fastpath <> io.fastpathOut
462  io.lsq.needReplayFromRS := load_s2.io.needReplayFromRS
463
464  // feedback tlb miss / dcache miss queue full
465  io.feedbackSlow.bits := RegNext(load_s2.io.rsFeedback.bits)
466  io.feedbackSlow.valid := RegNext(load_s2.io.rsFeedback.valid && !load_s2.io.out.bits.uop.robIdx.needFlush(io.redirect, io.flush))
467
468  // feedback bank conflict to rs
469  io.feedbackFast.bits := load_s1.io.rsFeedback.bits
470  io.feedbackFast.valid := load_s1.io.rsFeedback.valid
471  assert(!(RegNext(RegNext(io.feedbackFast.valid)) && io.feedbackSlow.valid))
472
473  // pre-calcuate sqIdx mask in s0, then send it to lsq in s1 for forwarding
474  val sqIdxMaskReg = RegNext(UIntToMask(load_s0.io.in.bits.uop.sqIdx.value, StoreQueueSize))
475  io.lsq.forward.sqIdxMask := sqIdxMaskReg
476
477  // // use s2_hit_way to select data received in s1
478  // load_s2.io.dcacheResp.bits.data := Mux1H(RegNext(io.dcache.s1_hit_way), RegNext(io.dcache.s1_data))
479  // assert(load_s2.io.dcacheResp.bits.data === io.dcache.resp.bits.data)
480
481  io.fastUop.valid := io.dcache.s1_hit_way.orR && // dcache hit
482    !io.dcache.s1_disable_fast_wakeup &&  // load fast wakeup should be disabled when dcache data read is not ready
483    load_s1.io.in.valid && // valid laod request
484    !load_s1.io.dcacheKill && // not mmio or tlb miss
485    !io.lsq.forward.dataInvalidFast // forward failed
486  io.fastUop.bits := load_s1.io.out.bits.uop
487
488  XSDebug(load_s0.io.out.valid,
489    p"S0: pc ${Hexadecimal(load_s0.io.out.bits.uop.cf.pc)}, lId ${Hexadecimal(load_s0.io.out.bits.uop.lqIdx.asUInt)}, " +
490    p"vaddr ${Hexadecimal(load_s0.io.out.bits.vaddr)}, mask ${Hexadecimal(load_s0.io.out.bits.mask)}\n")
491  XSDebug(load_s1.io.out.valid,
492    p"S1: pc ${Hexadecimal(load_s1.io.out.bits.uop.cf.pc)}, lId ${Hexadecimal(load_s1.io.out.bits.uop.lqIdx.asUInt)}, tlb_miss ${io.tlb.resp.bits.miss}, " +
493    p"paddr ${Hexadecimal(load_s1.io.out.bits.paddr)}, mmio ${load_s1.io.out.bits.mmio}\n")
494
495  // writeback to LSQ
496  // Current dcache use MSHR
497  // Load queue will be updated at s2 for both hit/miss int/fp load
498  io.lsq.loadIn.valid := load_s2.io.out.valid
499  io.lsq.loadIn.bits := load_s2.io.out.bits
500
501  // write to rob and writeback bus
502  val s2_wb_valid = load_s2.io.out.valid && !load_s2.io.out.bits.miss && !load_s2.io.out.bits.mmio
503
504  // Int load, if hit, will be writebacked at s2
505  val hitLoadOut = Wire(Valid(new ExuOutput))
506  hitLoadOut.valid := s2_wb_valid
507  hitLoadOut.bits.uop := load_s2.io.out.bits.uop
508  hitLoadOut.bits.data := load_s2.io.out.bits.data
509  hitLoadOut.bits.redirectValid := false.B
510  hitLoadOut.bits.redirect := DontCare
511  hitLoadOut.bits.debug.isMMIO := load_s2.io.out.bits.mmio
512  hitLoadOut.bits.debug.isPerfCnt := false.B
513  hitLoadOut.bits.debug.paddr := load_s2.io.out.bits.paddr
514  hitLoadOut.bits.fflags := DontCare
515
516  load_s2.io.out.ready := true.B
517
518  io.ldout.bits := Mux(hitLoadOut.valid, hitLoadOut.bits, io.lsq.ldout.bits)
519  io.ldout.valid := hitLoadOut.valid || io.lsq.ldout.valid
520
521  io.lsq.ldout.ready := !hitLoadOut.valid
522
523  when(io.ldout.fire()){
524    XSDebug("ldout %x\n", io.ldout.bits.uop.cf.pc)
525  }
526}
527