1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.mem 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import xiangshan.ExceptionNO._ 24import xiangshan._ 25import xiangshan.backend.fu.PMPRespBundle 26import xiangshan.cache._ 27import xiangshan.cache.mmu.{TlbCmd, TlbReq, TlbRequestIO, TlbResp} 28 29class LoadToLsqIO(implicit p: Parameters) extends XSBundle { 30 val loadIn = ValidIO(new LsPipelineBundle) 31 val ldout = Flipped(DecoupledIO(new ExuOutput)) 32 val loadDataForwarded = Output(Bool()) 33 val delayedLoadError = Output(Bool()) 34 val dcacheRequireReplay = Output(Bool()) 35 val forward = new PipeLoadForwardQueryIO 36 val loadViolationQuery = new LoadViolationQueryIO 37 val trigger = Flipped(new LqTriggerIO) 38} 39 40class LoadToLoadIO(implicit p: Parameters) extends XSBundle { 41 // load to load fast path is limited to ld (64 bit) used as vaddr src1 only 42 val data = UInt(XLEN.W) 43 val valid = Bool() 44} 45 46class LoadUnitTriggerIO(implicit p: Parameters) extends XSBundle { 47 val tdata2 = Input(UInt(64.W)) 48 val matchType = Input(UInt(2.W)) 49 val tEnable = Input(Bool()) // timing is calculated before this 50 val addrHit = Output(Bool()) 51 val lastDataHit = Output(Bool()) 52} 53 54// Load Pipeline Stage 0 55// Generate addr, use addr to query DCache and DTLB 56class LoadUnit_S0(implicit p: Parameters) extends XSModule with HasDCacheParameters{ 57 val io = IO(new Bundle() { 58 val in = Flipped(Decoupled(new ExuInput)) 59 val out = Decoupled(new LsPipelineBundle) 60 val dtlbReq = DecoupledIO(new TlbReq) 61 val dcacheReq = DecoupledIO(new DCacheWordReq) 62 val rsIdx = Input(UInt(log2Up(IssQueSize).W)) 63 val isFirstIssue = Input(Bool()) 64 val fastpath = Input(new LoadToLoadIO) 65 val s0_kill = Input(Bool()) 66 }) 67 require(LoadPipelineWidth == exuParameters.LduCnt) 68 69 val imm12 = io.in.bits.uop.ctrl.imm(11, 0) 70 val s0_vaddr = WireInit(io.in.bits.src(0) + SignExt(imm12, VAddrBits)) 71 val s0_mask = WireInit(genWmask(s0_vaddr, io.in.bits.uop.ctrl.fuOpType(1,0))) 72 val s0_uop = WireInit(io.in.bits.uop) 73 74 if (EnableLoadToLoadForward) { 75 // When there's no valid instruction from RS, we try the load-to-load forwarding. 76 when (!io.in.valid) { 77 s0_vaddr := io.fastpath.data 78 // Assume the pointer chasing is always ld. 79 s0_uop.ctrl.fuOpType := LSUOpType.ld 80 s0_mask := genWmask(0.U, LSUOpType.ld) 81 } 82 } 83 84 val isSoftPrefetch = LSUOpType.isPrefetch(s0_uop.ctrl.fuOpType) 85 val isSoftPrefetchRead = s0_uop.ctrl.fuOpType === LSUOpType.prefetch_r 86 val isSoftPrefetchWrite = s0_uop.ctrl.fuOpType === LSUOpType.prefetch_w 87 88 // query DTLB 89 io.dtlbReq.valid := io.in.valid || io.fastpath.valid 90 io.dtlbReq.bits.vaddr := s0_vaddr 91 io.dtlbReq.bits.cmd := TlbCmd.read 92 io.dtlbReq.bits.size := LSUOpType.size(s0_uop.ctrl.fuOpType) 93 io.dtlbReq.bits.kill := DontCare 94 io.dtlbReq.bits.debug.robIdx := s0_uop.robIdx 95 io.dtlbReq.bits.debug.pc := s0_uop.cf.pc 96 io.dtlbReq.bits.debug.isFirstIssue := io.isFirstIssue 97 98 // query DCache 99 io.dcacheReq.valid := io.in.valid || io.fastpath.valid 100 when (isSoftPrefetchRead) { 101 io.dcacheReq.bits.cmd := MemoryOpConstants.M_PFR 102 }.elsewhen (isSoftPrefetchWrite) { 103 io.dcacheReq.bits.cmd := MemoryOpConstants.M_PFW 104 }.otherwise { 105 io.dcacheReq.bits.cmd := MemoryOpConstants.M_XRD 106 } 107 io.dcacheReq.bits.addr := s0_vaddr 108 io.dcacheReq.bits.mask := s0_mask 109 io.dcacheReq.bits.data := DontCare 110 when(isSoftPrefetch) { 111 io.dcacheReq.bits.instrtype := SOFT_PREFETCH.U 112 }.otherwise { 113 io.dcacheReq.bits.instrtype := LOAD_SOURCE.U 114 } 115 116 // TODO: update cache meta 117 io.dcacheReq.bits.id := DontCare 118 119 val addrAligned = LookupTree(s0_uop.ctrl.fuOpType(1, 0), List( 120 "b00".U -> true.B, //b 121 "b01".U -> (s0_vaddr(0) === 0.U), //h 122 "b10".U -> (s0_vaddr(1, 0) === 0.U), //w 123 "b11".U -> (s0_vaddr(2, 0) === 0.U) //d 124 )) 125 126 io.out.valid := (io.in.valid || io.fastpath.valid) && io.dcacheReq.ready && !io.s0_kill 127 128 io.out.bits := DontCare 129 io.out.bits.vaddr := s0_vaddr 130 io.out.bits.mask := s0_mask 131 io.out.bits.uop := s0_uop 132 io.out.bits.uop.cf.exceptionVec(loadAddrMisaligned) := !addrAligned 133 io.out.bits.rsIdx := io.rsIdx 134 io.out.bits.isFirstIssue := io.isFirstIssue 135 io.out.bits.isSoftPrefetch := isSoftPrefetch 136 137 io.in.ready := !io.in.valid || (io.out.ready && io.dcacheReq.ready) 138 139 XSDebug(io.dcacheReq.fire, 140 p"[DCACHE LOAD REQ] pc ${Hexadecimal(s0_uop.cf.pc)}, vaddr ${Hexadecimal(s0_vaddr)}\n" 141 ) 142 XSPerfAccumulate("in_valid", io.in.valid) 143 XSPerfAccumulate("in_fire", io.in.fire) 144 XSPerfAccumulate("in_fire_first_issue", io.in.valid && io.isFirstIssue) 145 XSPerfAccumulate("stall_out", io.out.valid && !io.out.ready && io.dcacheReq.ready) 146 XSPerfAccumulate("stall_dcache", io.out.valid && io.out.ready && !io.dcacheReq.ready) 147 XSPerfAccumulate("addr_spec_success", io.out.fire && s0_vaddr(VAddrBits-1, 12) === io.in.bits.src(0)(VAddrBits-1, 12)) 148 XSPerfAccumulate("addr_spec_failed", io.out.fire && s0_vaddr(VAddrBits-1, 12) =/= io.in.bits.src(0)(VAddrBits-1, 12)) 149 XSPerfAccumulate("addr_spec_success_once", io.out.fire && s0_vaddr(VAddrBits-1, 12) === io.in.bits.src(0)(VAddrBits-1, 12) && io.isFirstIssue) 150 XSPerfAccumulate("addr_spec_failed_once", io.out.fire && s0_vaddr(VAddrBits-1, 12) =/= io.in.bits.src(0)(VAddrBits-1, 12) && io.isFirstIssue) 151} 152 153 154// Load Pipeline Stage 1 155// TLB resp (send paddr to dcache) 156class LoadUnit_S1(implicit p: Parameters) extends XSModule { 157 val io = IO(new Bundle() { 158 val in = Flipped(Decoupled(new LsPipelineBundle)) 159 val s1_kill = Input(Bool()) 160 val out = Decoupled(new LsPipelineBundle) 161 val dtlbResp = Flipped(DecoupledIO(new TlbResp)) 162 val dcachePAddr = Output(UInt(PAddrBits.W)) 163 val dcacheKill = Output(Bool()) 164 val dcacheBankConflict = Input(Bool()) 165 val fullForwardFast = Output(Bool()) 166 val sbuffer = new LoadForwardQueryIO 167 val lsq = new PipeLoadForwardQueryIO 168 val loadViolationQueryReq = Decoupled(new LoadViolationQueryReq) 169 val rsFeedback = ValidIO(new RSFeedback) 170 val csrCtrl = Flipped(new CustomCSRCtrlIO) 171 val needLdVioCheckRedo = Output(Bool()) 172 }) 173 174 val s1_uop = io.in.bits.uop 175 val s1_paddr = io.dtlbResp.bits.paddr 176 // af & pf exception were modified below. 177 val s1_exception = ExceptionNO.selectByFu(io.out.bits.uop.cf.exceptionVec, lduCfg).asUInt.orR 178 val s1_tlb_miss = io.dtlbResp.bits.miss 179 val s1_mask = io.in.bits.mask 180 val s1_bank_conflict = io.dcacheBankConflict 181 182 io.out.bits := io.in.bits // forwardXX field will be updated in s1 183 184 io.dtlbResp.ready := true.B 185 186 io.dcachePAddr := s1_paddr 187 //io.dcacheKill := s1_tlb_miss || s1_exception || s1_mmio 188 io.dcacheKill := s1_tlb_miss || s1_exception || io.s1_kill 189 // load forward query datapath 190 io.sbuffer.valid := io.in.valid && !(s1_exception || s1_tlb_miss || io.s1_kill) 191 io.sbuffer.vaddr := io.in.bits.vaddr 192 io.sbuffer.paddr := s1_paddr 193 io.sbuffer.uop := s1_uop 194 io.sbuffer.sqIdx := s1_uop.sqIdx 195 io.sbuffer.mask := s1_mask 196 io.sbuffer.pc := s1_uop.cf.pc // FIXME: remove it 197 198 io.lsq.valid := io.in.valid && !(s1_exception || s1_tlb_miss || io.s1_kill) 199 io.lsq.vaddr := io.in.bits.vaddr 200 io.lsq.paddr := s1_paddr 201 io.lsq.uop := s1_uop 202 io.lsq.sqIdx := s1_uop.sqIdx 203 io.lsq.sqIdxMask := DontCare // will be overwritten by sqIdxMask pre-generated in s0 204 io.lsq.mask := s1_mask 205 io.lsq.pc := s1_uop.cf.pc // FIXME: remove it 206 207 // ld-ld violation query 208 io.loadViolationQueryReq.valid := io.in.valid && !(s1_exception || s1_tlb_miss || io.s1_kill) 209 io.loadViolationQueryReq.bits.paddr := s1_paddr 210 io.loadViolationQueryReq.bits.uop := s1_uop 211 212 // Generate forwardMaskFast to wake up insts earlier 213 val forwardMaskFast = io.lsq.forwardMaskFast.asUInt | io.sbuffer.forwardMaskFast.asUInt 214 io.fullForwardFast := ((~forwardMaskFast).asUInt & s1_mask) === 0.U 215 216 // Generate feedback signal caused by: 217 // * dcache bank conflict 218 // * need redo ld-ld violation check 219 val needLdVioCheckRedo = io.loadViolationQueryReq.valid && 220 !io.loadViolationQueryReq.ready && 221 RegNext(io.csrCtrl.ldld_vio_check_enable) 222 io.needLdVioCheckRedo := needLdVioCheckRedo 223 io.rsFeedback.valid := io.in.valid && (s1_bank_conflict || needLdVioCheckRedo) && !io.s1_kill 224 io.rsFeedback.bits.hit := false.B // we have found s1_bank_conflict / re do ld-ld violation check 225 io.rsFeedback.bits.rsIdx := io.in.bits.rsIdx 226 io.rsFeedback.bits.flushState := io.in.bits.ptwBack 227 io.rsFeedback.bits.sourceType := Mux(s1_bank_conflict, RSFeedbackType.bankConflict, RSFeedbackType.ldVioCheckRedo) 228 io.rsFeedback.bits.dataInvalidSqIdx := DontCare 229 230 // if replay is detected in load_s1, 231 // load inst will be canceled immediately 232 io.out.valid := io.in.valid && !io.rsFeedback.valid && !io.s1_kill 233 io.out.bits.paddr := s1_paddr 234 io.out.bits.tlbMiss := s1_tlb_miss 235 236 // current ori test will cause the case of ldest == 0, below will be modifeid in the future. 237 // af & pf exception were modified 238 io.out.bits.uop.cf.exceptionVec(loadPageFault) := io.dtlbResp.bits.excp.pf.ld 239 io.out.bits.uop.cf.exceptionVec(loadAccessFault) := io.dtlbResp.bits.excp.af.ld 240 241 io.out.bits.ptwBack := io.dtlbResp.bits.ptwBack 242 io.out.bits.rsIdx := io.in.bits.rsIdx 243 244 io.out.bits.isSoftPrefetch := io.in.bits.isSoftPrefetch 245 246 io.in.ready := !io.in.valid || io.out.ready 247 248 XSPerfAccumulate("in_valid", io.in.valid) 249 XSPerfAccumulate("in_fire", io.in.fire) 250 XSPerfAccumulate("in_fire_first_issue", io.in.fire && io.in.bits.isFirstIssue) 251 XSPerfAccumulate("tlb_miss", io.in.fire && s1_tlb_miss) 252 XSPerfAccumulate("tlb_miss_first_issue", io.in.fire && s1_tlb_miss && io.in.bits.isFirstIssue) 253 XSPerfAccumulate("stall_out", io.out.valid && !io.out.ready) 254} 255 256// Load Pipeline Stage 2 257// DCache resp 258class LoadUnit_S2(implicit p: Parameters) extends XSModule with HasLoadHelper { 259 val io = IO(new Bundle() { 260 val in = Flipped(Decoupled(new LsPipelineBundle)) 261 val out = Decoupled(new LsPipelineBundle) 262 val rsFeedback = ValidIO(new RSFeedback) 263 val dcacheResp = Flipped(DecoupledIO(new DCacheWordResp)) 264 val pmpResp = Flipped(new PMPRespBundle()) 265 val lsq = new LoadForwardQueryIO 266 val dataInvalidSqIdx = Input(UInt()) 267 val sbuffer = new LoadForwardQueryIO 268 val dataForwarded = Output(Bool()) 269 val dcacheRequireReplay = Output(Bool()) 270 val fullForward = Output(Bool()) 271 val fastpath = Output(new LoadToLoadIO) 272 val dcache_kill = Output(Bool()) 273 val delayedLoadError = Output(Bool()) 274 val loadViolationQueryResp = Flipped(Valid(new LoadViolationQueryResp)) 275 val csrCtrl = Flipped(new CustomCSRCtrlIO) 276 val sentFastUop = Input(Bool()) 277 val static_pm = Input(Valid(Bool())) // valid for static, bits for mmio 278 }) 279 280 val pmp = WireInit(io.pmpResp) 281 when (io.static_pm.valid) { 282 pmp.ld := false.B 283 pmp.st := false.B 284 pmp.instr := false.B 285 pmp.mmio := io.static_pm.bits 286 } 287 288 val s2_is_prefetch = io.in.bits.isSoftPrefetch 289 290 // exception that may cause load addr to be invalid / illegal 291 // 292 // if such exception happen, that inst and its exception info 293 // will be force writebacked to rob 294 val s2_exception_vec = WireInit(io.in.bits.uop.cf.exceptionVec) 295 s2_exception_vec(loadAccessFault) := io.in.bits.uop.cf.exceptionVec(loadAccessFault) || pmp.ld 296 // soft prefetch will not trigger any exception (but ecc error interrupt may be triggered) 297 when (s2_is_prefetch) { 298 s2_exception_vec := 0.U.asTypeOf(s2_exception_vec.cloneType) 299 } 300 val s2_exception = ExceptionNO.selectByFu(s2_exception_vec, lduCfg).asUInt.orR 301 302 // writeback access fault caused by ecc error / bus error 303 // 304 // * ecc data error is slow to generate, so we will not use it until load stage 3 305 // * in load stage 3, an extra signal io.load_error will be used to 306 307 // now cache ecc error will raise an access fault 308 // at the same time, error info (including error paddr) will be write to 309 // an customized CSR "CACHE_ERROR" 310 if (EnableAccurateLoadError) { 311 io.delayedLoadError := io.dcacheResp.bits.error_delayed && 312 io.csrCtrl.cache_error_enable && 313 RegNext(io.out.valid) 314 } else { 315 io.delayedLoadError := false.B 316 } 317 318 val actually_mmio = pmp.mmio 319 val s2_uop = io.in.bits.uop 320 val s2_mask = io.in.bits.mask 321 val s2_paddr = io.in.bits.paddr 322 val s2_tlb_miss = io.in.bits.tlbMiss 323 val s2_mmio = !s2_is_prefetch && actually_mmio && !s2_exception 324 val s2_cache_miss = io.dcacheResp.bits.miss 325 val s2_cache_replay = io.dcacheResp.bits.replay 326 val s2_cache_tag_error = io.dcacheResp.bits.tag_error 327 val s2_forward_fail = io.lsq.matchInvalid || io.sbuffer.matchInvalid 328 val s2_ldld_violation = io.loadViolationQueryResp.valid && 329 io.loadViolationQueryResp.bits.have_violation && 330 RegNext(io.csrCtrl.ldld_vio_check_enable) 331 val s2_data_invalid = io.lsq.dataInvalid && !s2_forward_fail && !s2_ldld_violation && !s2_exception 332 333 io.dcache_kill := pmp.ld || pmp.mmio // move pmp resp kill to outside 334 io.dcacheResp.ready := true.B 335 val dcacheShouldResp = !(s2_tlb_miss || s2_exception || s2_mmio || s2_is_prefetch) 336 assert(!(io.in.valid && (dcacheShouldResp && !io.dcacheResp.valid)), "DCache response got lost") 337 338 // merge forward result 339 // lsq has higher priority than sbuffer 340 val forwardMask = Wire(Vec(8, Bool())) 341 val forwardData = Wire(Vec(8, UInt(8.W))) 342 343 val fullForward = ((~forwardMask.asUInt).asUInt & s2_mask) === 0.U && !io.lsq.dataInvalid 344 io.lsq := DontCare 345 io.sbuffer := DontCare 346 io.fullForward := fullForward 347 348 // generate XLEN/8 Muxs 349 for (i <- 0 until XLEN / 8) { 350 forwardMask(i) := io.lsq.forwardMask(i) || io.sbuffer.forwardMask(i) 351 forwardData(i) := Mux(io.lsq.forwardMask(i), io.lsq.forwardData(i), io.sbuffer.forwardData(i)) 352 } 353 354 XSDebug(io.out.fire, "[FWD LOAD RESP] pc %x fwd %x(%b) + %x(%b)\n", 355 s2_uop.cf.pc, 356 io.lsq.forwardData.asUInt, io.lsq.forwardMask.asUInt, 357 io.in.bits.forwardData.asUInt, io.in.bits.forwardMask.asUInt 358 ) 359 360 // data merge 361 val rdataVec = VecInit((0 until XLEN / 8).map(j => 362 Mux(forwardMask(j), forwardData(j), io.dcacheResp.bits.data(8*(j+1)-1, 8*j)))) 363 val rdata = rdataVec.asUInt 364 val rdataSel = LookupTree(s2_paddr(2, 0), List( 365 "b000".U -> rdata(63, 0), 366 "b001".U -> rdata(63, 8), 367 "b010".U -> rdata(63, 16), 368 "b011".U -> rdata(63, 24), 369 "b100".U -> rdata(63, 32), 370 "b101".U -> rdata(63, 40), 371 "b110".U -> rdata(63, 48), 372 "b111".U -> rdata(63, 56) 373 )) 374 val rdataPartialLoad = rdataHelper(s2_uop, rdataSel) 375 376 io.out.valid := io.in.valid && !s2_tlb_miss && !s2_data_invalid 377 // Inst will be canceled in store queue / lsq, 378 // so we do not need to care about flush in load / store unit's out.valid 379 io.out.bits := io.in.bits 380 io.out.bits.data := rdataPartialLoad 381 // when exception occurs, set it to not miss and let it write back to rob (via int port) 382 if (EnableFastForward) { 383 io.out.bits.miss := s2_cache_miss && 384 !s2_exception && 385 !s2_forward_fail && 386 !s2_ldld_violation && 387 !fullForward && 388 !s2_is_prefetch 389 } else { 390 io.out.bits.miss := s2_cache_miss && 391 !s2_exception && 392 !s2_forward_fail && 393 !s2_ldld_violation && 394 !s2_is_prefetch 395 } 396 io.out.bits.uop.ctrl.fpWen := io.in.bits.uop.ctrl.fpWen && !s2_exception 397 // if forward fail, replay this inst from fetch 398 val forwardFailReplay = s2_forward_fail && !s2_mmio && !s2_is_prefetch && !s2_tlb_miss 399 // if ld-ld violation is detected, replay from this inst from fetch 400 val ldldVioReplay = s2_ldld_violation && !s2_mmio && !s2_is_prefetch && !s2_tlb_miss 401 val s2_need_replay_from_fetch = (s2_forward_fail || s2_ldld_violation) && !s2_mmio && !s2_is_prefetch && !s2_tlb_miss 402 io.out.bits.uop.ctrl.replayInst := s2_need_replay_from_fetch 403 io.out.bits.mmio := s2_mmio 404 io.out.bits.uop.ctrl.flushPipe := s2_mmio && io.sentFastUop 405 io.out.bits.uop.cf.exceptionVec := s2_exception_vec // cache error not included 406 407 // For timing reasons, sometimes we can not let 408 // io.out.bits.miss := s2_cache_miss && !s2_exception && !fullForward 409 // We use io.dataForwarded instead. It means: 410 // 1. Forward logic have prepared all data needed, 411 // and dcache query is no longer needed. 412 // 2. ... or data cache tag error is detected, this kind of inst 413 // will not update miss queue. That is to say, if miss, that inst 414 // may not be refilled 415 // Such inst will be writebacked from load queue. 416 io.dataForwarded := s2_cache_miss && !s2_exception && !s2_forward_fail && 417 (fullForward || io.csrCtrl.cache_error_enable && s2_cache_tag_error) 418 // io.out.bits.forwardX will be send to lq 419 io.out.bits.forwardMask := forwardMask 420 // data retbrived from dcache is also included in io.out.bits.forwardData 421 io.out.bits.forwardData := rdataVec 422 423 io.in.ready := io.out.ready || !io.in.valid 424 425 // feedback tlb result to RS 426 io.rsFeedback.valid := io.in.valid 427 val s2_need_replay_from_rs = Wire(Bool()) 428 if (EnableFastForward) { 429 s2_need_replay_from_rs := 430 s2_tlb_miss || // replay if dtlb miss 431 s2_cache_replay && !s2_is_prefetch && !s2_forward_fail && !s2_ldld_violation && !s2_mmio && !s2_exception && !fullForward || // replay if dcache miss queue full / busy 432 s2_data_invalid && !s2_is_prefetch && !s2_forward_fail && !s2_ldld_violation // replay if store to load forward data is not ready 433 } else { 434 // Note that if all parts of data are available in sq / sbuffer, replay required by dcache will not be scheduled 435 s2_need_replay_from_rs := 436 s2_tlb_miss || // replay if dtlb miss 437 s2_cache_replay && !s2_is_prefetch && !s2_forward_fail && !s2_ldld_violation && !s2_mmio && !s2_exception && !io.dataForwarded || // replay if dcache miss queue full / busy 438 s2_data_invalid && !s2_is_prefetch && !s2_forward_fail && !s2_ldld_violation // replay if store to load forward data is not ready 439 } 440 assert(!RegNext(io.in.valid && s2_need_replay_from_rs && s2_need_replay_from_fetch)) 441 io.rsFeedback.bits.hit := !s2_need_replay_from_rs 442 io.rsFeedback.bits.rsIdx := io.in.bits.rsIdx 443 io.rsFeedback.bits.flushState := io.in.bits.ptwBack 444 // feedback source priority: tlbMiss > dataInvalid > mshrFull 445 // general case priority: tlbMiss > exception (include forward_fail / ldld_violation) > mmio > dataInvalid > mshrFull > normal miss / hit 446 io.rsFeedback.bits.sourceType := Mux(s2_tlb_miss, RSFeedbackType.tlbMiss, 447 Mux(s2_data_invalid, 448 RSFeedbackType.dataInvalid, 449 RSFeedbackType.mshrFull 450 ) 451 ) 452 io.rsFeedback.bits.dataInvalidSqIdx.value := io.dataInvalidSqIdx 453 io.rsFeedback.bits.dataInvalidSqIdx.flag := DontCare 454 455 // s2_cache_replay is quite slow to generate, send it separately to LQ 456 if (EnableFastForward) { 457 io.dcacheRequireReplay := s2_cache_replay && !fullForward 458 } else { 459 io.dcacheRequireReplay := s2_cache_replay && 460 !io.rsFeedback.bits.hit && 461 !io.dataForwarded && 462 !s2_is_prefetch && 463 io.out.bits.miss 464 } 465 466 // fast load to load forward 467 io.fastpath.valid := RegNext(io.out.valid) // for debug only 468 io.fastpath.data := RegNext(io.out.bits.data) 469 470 471 XSDebug(io.out.fire, "[DCACHE LOAD RESP] pc %x rdata %x <- D$ %x + fwd %x(%b)\n", 472 s2_uop.cf.pc, rdataPartialLoad, io.dcacheResp.bits.data, 473 forwardData.asUInt, forwardMask.asUInt 474 ) 475 476 XSPerfAccumulate("in_valid", io.in.valid) 477 XSPerfAccumulate("in_fire", io.in.fire) 478 XSPerfAccumulate("in_fire_first_issue", io.in.fire && io.in.bits.isFirstIssue) 479 XSPerfAccumulate("dcache_miss", io.in.fire && s2_cache_miss) 480 XSPerfAccumulate("dcache_miss_first_issue", io.in.fire && s2_cache_miss && io.in.bits.isFirstIssue) 481 XSPerfAccumulate("full_forward", io.in.valid && fullForward) 482 XSPerfAccumulate("dcache_miss_full_forward", io.in.valid && s2_cache_miss && fullForward) 483 XSPerfAccumulate("replay", io.rsFeedback.valid && !io.rsFeedback.bits.hit) 484 XSPerfAccumulate("replay_tlb_miss", io.rsFeedback.valid && !io.rsFeedback.bits.hit && s2_tlb_miss) 485 XSPerfAccumulate("replay_cache", io.rsFeedback.valid && !io.rsFeedback.bits.hit && !s2_tlb_miss && s2_cache_replay) 486 XSPerfAccumulate("stall_out", io.out.valid && !io.out.ready) 487 XSPerfAccumulate("replay_from_fetch_forward", io.out.valid && forwardFailReplay) 488 XSPerfAccumulate("replay_from_fetch_load_vio", io.out.valid && ldldVioReplay) 489} 490 491class LoadUnit(implicit p: Parameters) extends XSModule 492 with HasLoadHelper 493 with HasPerfEvents 494 with HasDCacheParameters 495{ 496 val io = IO(new Bundle() { 497 val ldin = Flipped(Decoupled(new ExuInput)) 498 val ldout = Decoupled(new ExuOutput) 499 val redirect = Flipped(ValidIO(new Redirect)) 500 val feedbackSlow = ValidIO(new RSFeedback) 501 val feedbackFast = ValidIO(new RSFeedback) 502 val rsIdx = Input(UInt(log2Up(IssQueSize).W)) 503 val isFirstIssue = Input(Bool()) 504 val dcache = new DCacheLoadIO 505 val sbuffer = new LoadForwardQueryIO 506 val lsq = new LoadToLsqIO 507 val refill = Flipped(ValidIO(new Refill)) 508 val fastUop = ValidIO(new MicroOp) // early wakeup signal generated in load_s1, send to RS in load_s2 509 val trigger = Vec(3, new LoadUnitTriggerIO) 510 511 val tlb = new TlbRequestIO 512 val pmp = Flipped(new PMPRespBundle()) // arrive same to tlb now 513 514 val fastpathOut = Output(new LoadToLoadIO) 515 val fastpathIn = Input(new LoadToLoadIO) 516 val loadFastMatch = Input(Bool()) 517 518 val delayedLoadError = Output(Bool()) // load ecc error 519 // Note that io.delayedLoadError and io.lsq.delayedLoadError is different 520 521 val csrCtrl = Flipped(new CustomCSRCtrlIO) 522 }) 523 524 val load_s0 = Module(new LoadUnit_S0) 525 val load_s1 = Module(new LoadUnit_S1) 526 val load_s2 = Module(new LoadUnit_S2) 527 528 load_s0.io.in <> io.ldin 529 load_s0.io.dtlbReq <> io.tlb.req 530 load_s0.io.dcacheReq <> io.dcache.req 531 load_s0.io.rsIdx := io.rsIdx 532 load_s0.io.isFirstIssue := io.isFirstIssue 533 load_s0.io.fastpath := io.fastpathIn 534 load_s0.io.s0_kill := false.B 535 val s0_tryPointerChasing = !io.ldin.valid && io.fastpathIn.valid 536 537 PipelineConnect(load_s0.io.out, load_s1.io.in, true.B, 538 load_s0.io.out.bits.uop.robIdx.needFlush(io.redirect) && !s0_tryPointerChasing) 539 540 load_s1.io.s1_kill := RegEnable(load_s0.io.s0_kill, false.B, load_s0.io.in.valid || io.fastpathIn.valid) 541 io.tlb.req_kill := load_s1.io.s1_kill 542 load_s1.io.dtlbResp <> io.tlb.resp 543 io.dcache.s1_paddr <> load_s1.io.dcachePAddr 544 io.dcache.s1_kill := load_s1.io.dcacheKill 545 load_s1.io.sbuffer <> io.sbuffer 546 load_s1.io.lsq <> io.lsq.forward 547 load_s1.io.loadViolationQueryReq <> io.lsq.loadViolationQuery.req 548 load_s1.io.dcacheBankConflict <> io.dcache.s1_bank_conflict 549 load_s1.io.csrCtrl <> io.csrCtrl 550 551 val s1_tryPointerChasing = RegNext(s0_tryPointerChasing && load_s0.io.in.ready && load_s0.io.dcacheReq.ready, false.B) 552 val cancelPointerChasing = WireInit(false.B) 553 if (EnableLoadToLoadForward) { 554 // Sometimes, we need to cancel the load-load forwarding. 555 // These can be put at S0 if timing is bad at S1. 556 // Case 0: CACHE_SET(base + offset) != CACHE_SET(base) (lowest 6-bit addition has an overflow) 557 val speculativeAddress = RegEnable(load_s0.io.fastpath.data(5, 0), s0_tryPointerChasing) 558 val realPointerAddress = Cat(speculativeAddress(5, 3), 0.U(3.W)) +& io.ldin.bits.uop.ctrl.imm(5, 0) 559 val addressMisMatch = realPointerAddress(6) || io.ldin.bits.uop.ctrl.imm(11, 6).orR 560 // Case 1: the address is not 64-bit aligned or the fuOpType is not LD 561 val addressNotAligned = speculativeAddress(2, 0).orR 562 val fuOpTypeIsNotLd = io.ldin.bits.uop.ctrl.fuOpType =/= LSUOpType.ld 563 // Case 2: this is not a valid load-load pair 564 val notFastMatch = RegEnable(!io.loadFastMatch, s0_tryPointerChasing) 565 // Case 3: this load-load uop is cancelled 566 val isCancelled = !io.ldin.valid 567 when (s1_tryPointerChasing) { 568 cancelPointerChasing := addressMisMatch || addressNotAligned || fuOpTypeIsNotLd || notFastMatch || isCancelled 569 load_s1.io.in.bits.uop := io.ldin.bits.uop 570 val spec_vaddr = load_s1.io.in.bits.vaddr 571 val vaddr = Cat(spec_vaddr(VAddrBits - 1, 6), realPointerAddress(5, 3), spec_vaddr(2, 0)) 572 io.sbuffer.vaddr := vaddr 573 io.lsq.forward.vaddr := vaddr 574 load_s1.io.in.bits.rsIdx := io.rsIdx 575 load_s1.io.in.bits.isFirstIssue := io.isFirstIssue 576 // We need to replace vaddr(5, 3). 577 val spec_paddr = io.tlb.resp.bits.paddr 578 load_s1.io.dtlbResp.bits.paddr := Cat(spec_paddr(PAddrBits - 1, 6), realPointerAddress(5, 3), spec_paddr(2, 0)) 579 } 580 when (cancelPointerChasing) { 581 load_s1.io.s1_kill := true.B 582 }.otherwise { 583 load_s0.io.s0_kill := s1_tryPointerChasing 584 when (s1_tryPointerChasing) { 585 io.ldin.ready := true.B 586 } 587 } 588 589 XSPerfAccumulate("load_to_load_forward", s1_tryPointerChasing && !cancelPointerChasing) 590 XSPerfAccumulate("load_to_load_forward_try", s1_tryPointerChasing) 591 XSPerfAccumulate("load_to_load_forward_fail", cancelPointerChasing) 592 XSPerfAccumulate("load_to_load_forward_fail_cancelled", cancelPointerChasing && isCancelled) 593 XSPerfAccumulate("load_to_load_forward_fail_wakeup_mismatch", cancelPointerChasing && !isCancelled && notFastMatch) 594 XSPerfAccumulate("load_to_load_forward_fail_op_not_ld", 595 cancelPointerChasing && !isCancelled && !notFastMatch && fuOpTypeIsNotLd) 596 XSPerfAccumulate("load_to_load_forward_fail_addr_align", 597 cancelPointerChasing && !isCancelled && !notFastMatch && !fuOpTypeIsNotLd && addressNotAligned) 598 XSPerfAccumulate("load_to_load_forward_fail_set_mismatch", 599 cancelPointerChasing && !isCancelled && !notFastMatch && !fuOpTypeIsNotLd && !addressNotAligned && addressMisMatch) 600 } 601 PipelineConnect(load_s1.io.out, load_s2.io.in, true.B, 602 load_s1.io.out.bits.uop.robIdx.needFlush(io.redirect) || cancelPointerChasing) 603 604 io.dcache.s2_kill := load_s2.io.dcache_kill // to kill mmio resp which are redirected 605 load_s2.io.dcacheResp <> io.dcache.resp 606 load_s2.io.pmpResp <> io.pmp 607 load_s2.io.static_pm := RegNext(io.tlb.resp.bits.static_pm) 608 load_s2.io.lsq.forwardData <> io.lsq.forward.forwardData 609 load_s2.io.lsq.forwardMask <> io.lsq.forward.forwardMask 610 load_s2.io.lsq.forwardMaskFast <> io.lsq.forward.forwardMaskFast // should not be used in load_s2 611 load_s2.io.lsq.dataInvalid <> io.lsq.forward.dataInvalid 612 load_s2.io.lsq.matchInvalid <> io.lsq.forward.matchInvalid 613 load_s2.io.sbuffer.forwardData <> io.sbuffer.forwardData 614 load_s2.io.sbuffer.forwardMask <> io.sbuffer.forwardMask 615 load_s2.io.sbuffer.forwardMaskFast <> io.sbuffer.forwardMaskFast // should not be used in load_s2 616 load_s2.io.sbuffer.dataInvalid <> io.sbuffer.dataInvalid // always false 617 load_s2.io.sbuffer.matchInvalid <> io.sbuffer.matchInvalid 618 load_s2.io.dataForwarded <> io.lsq.loadDataForwarded 619 load_s2.io.fastpath <> io.fastpathOut 620 load_s2.io.dataInvalidSqIdx := io.lsq.forward.dataInvalidSqIdx // provide dataInvalidSqIdx to make wakeup faster 621 load_s2.io.loadViolationQueryResp <> io.lsq.loadViolationQuery.resp 622 load_s2.io.csrCtrl <> io.csrCtrl 623 load_s2.io.sentFastUop := io.fastUop.valid 624 625 // actually load s3 626 io.lsq.dcacheRequireReplay := load_s2.io.dcacheRequireReplay 627 io.lsq.delayedLoadError := load_s2.io.delayedLoadError 628 629 // feedback tlb miss / dcache miss queue full 630 io.feedbackSlow.valid := RegNext(load_s2.io.rsFeedback.valid && !load_s2.io.out.bits.uop.robIdx.needFlush(io.redirect)) 631 io.feedbackSlow.bits := RegNext(load_s2.io.rsFeedback.bits) 632 val s3_replay_for_mshrfull = RegNext(!load_s2.io.rsFeedback.bits.hit && load_s2.io.rsFeedback.bits.sourceType === RSFeedbackType.mshrFull) 633 val s3_refill_hit_load_paddr = refill_addr_hit(RegNext(load_s2.io.out.bits.paddr), io.refill.bits.addr) 634 // update replay request 635 io.feedbackSlow.bits.hit := RegNext(load_s2.io.rsFeedback.bits).hit || 636 s3_refill_hit_load_paddr && s3_replay_for_mshrfull 637 638 // feedback bank conflict / ld-vio check struct hazard to rs 639 io.feedbackFast.bits := RegNext(load_s1.io.rsFeedback.bits) 640 io.feedbackFast.valid := RegNext(load_s1.io.rsFeedback.valid && !load_s1.io.out.bits.uop.robIdx.needFlush(io.redirect)) 641 // If replay is reported at load_s1, inst will be canceled (will not enter load_s2), 642 // in that case: 643 // * replay should not be reported twice 644 assert(!(RegNext(io.feedbackFast.valid) && io.feedbackSlow.valid)) 645 // * io.fastUop.valid should not be reported 646 assert(!RegNext(io.feedbackFast.valid && io.fastUop.valid)) 647 648 // pre-calcuate sqIdx mask in s0, then send it to lsq in s1 for forwarding 649 val sqIdxMaskReg = RegNext(UIntToMask(load_s0.io.in.bits.uop.sqIdx.value, StoreQueueSize)) 650 // to enable load-load, sqIdxMask must be calculated based on ldin.uop 651 // If the timing here is not OK, load-load forwarding has to be disabled. 652 // Or we calculate sqIdxMask at RS?? 653 io.lsq.forward.sqIdxMask := sqIdxMaskReg 654 if (EnableLoadToLoadForward) { 655 when (s1_tryPointerChasing) { 656 io.lsq.forward.sqIdxMask := UIntToMask(io.ldin.bits.uop.sqIdx.value, StoreQueueSize) 657 } 658 } 659 660 // // use s2_hit_way to select data received in s1 661 // load_s2.io.dcacheResp.bits.data := Mux1H(RegNext(io.dcache.s1_hit_way), RegNext(io.dcache.s1_data)) 662 // assert(load_s2.io.dcacheResp.bits.data === io.dcache.resp.bits.data) 663 664 // now io.fastUop.valid is sent to RS in load_s2 665 io.fastUop.valid := RegNext( 666 io.dcache.s1_hit_way.orR && // dcache hit 667 !io.dcache.s1_disable_fast_wakeup && // load fast wakeup should be disabled when dcache data read is not ready 668 load_s1.io.in.valid && // valid load request 669 !load_s1.io.s1_kill && // killed by load-load forwarding 670 !load_s1.io.dtlbResp.bits.fast_miss && // not mmio or tlb miss, pf / af not included here 671 !io.lsq.forward.dataInvalidFast && // forward failed 672 !load_s1.io.needLdVioCheckRedo // load-load violation check: load paddr cam struct hazard 673 ) && !RegNext(load_s1.io.out.bits.uop.robIdx.needFlush(io.redirect)) 674 io.fastUop.bits := RegNext(load_s1.io.out.bits.uop) 675 676 XSDebug(load_s0.io.out.valid, 677 p"S0: pc ${Hexadecimal(load_s0.io.out.bits.uop.cf.pc)}, lId ${Hexadecimal(load_s0.io.out.bits.uop.lqIdx.asUInt)}, " + 678 p"vaddr ${Hexadecimal(load_s0.io.out.bits.vaddr)}, mask ${Hexadecimal(load_s0.io.out.bits.mask)}\n") 679 XSDebug(load_s1.io.out.valid, 680 p"S1: pc ${Hexadecimal(load_s1.io.out.bits.uop.cf.pc)}, lId ${Hexadecimal(load_s1.io.out.bits.uop.lqIdx.asUInt)}, tlb_miss ${io.tlb.resp.bits.miss}, " + 681 p"paddr ${Hexadecimal(load_s1.io.out.bits.paddr)}, mmio ${load_s1.io.out.bits.mmio}\n") 682 683 // writeback to LSQ 684 // Current dcache use MSHR 685 // Load queue will be updated at s2 for both hit/miss int/fp load 686 io.lsq.loadIn.valid := load_s2.io.out.valid 687 io.lsq.loadIn.bits := load_s2.io.out.bits 688 689 // write to rob and writeback bus 690 val s2_wb_valid = load_s2.io.out.valid && !load_s2.io.out.bits.miss && !load_s2.io.out.bits.mmio 691 692 // Int load, if hit, will be writebacked at s2 693 val hitLoadOut = Wire(Valid(new ExuOutput)) 694 hitLoadOut.valid := s2_wb_valid 695 hitLoadOut.bits.uop := load_s2.io.out.bits.uop 696 hitLoadOut.bits.data := load_s2.io.out.bits.data 697 hitLoadOut.bits.redirectValid := false.B 698 hitLoadOut.bits.redirect := DontCare 699 hitLoadOut.bits.debug.isMMIO := load_s2.io.out.bits.mmio 700 hitLoadOut.bits.debug.isPerfCnt := false.B 701 hitLoadOut.bits.debug.paddr := load_s2.io.out.bits.paddr 702 hitLoadOut.bits.debug.vaddr := load_s2.io.out.bits.vaddr 703 hitLoadOut.bits.fflags := DontCare 704 705 load_s2.io.out.ready := true.B 706 707 val load_wb_reg = RegNext(Mux(hitLoadOut.valid, hitLoadOut.bits, io.lsq.ldout.bits)) 708 io.ldout.bits := load_wb_reg 709 io.ldout.valid := RegNext(hitLoadOut.valid) && !RegNext(load_s2.io.out.bits.uop.robIdx.needFlush(io.redirect)) || 710 RegNext(io.lsq.ldout.valid) && !RegNext(io.lsq.ldout.bits.uop.robIdx.needFlush(io.redirect)) && !RegNext(hitLoadOut.valid) 711 712 // io.ldout.bits.uop.cf.exceptionVec(loadAccessFault) := load_wb_reg.uop.cf.exceptionVec(loadAccessFault) || 713 // hitLoadOut.valid && load_s2.io.delayedLoadError 714 715 // io.delayedLoadError := false.B 716 717 io.delayedLoadError := hitLoadOut.valid && load_s2.io.delayedLoadError 718 719 io.lsq.ldout.ready := !hitLoadOut.valid 720 721 when(io.feedbackSlow.valid && !io.feedbackSlow.bits.hit){ 722 // when need replay from rs, inst should not be writebacked to rob 723 assert(RegNext(!hitLoadOut.valid)) 724 // when need replay from rs 725 // * inst should not be writebacked to lq, or 726 // * lq state will be updated in load_s3 (next cycle) 727 assert(RegNext(!io.lsq.loadIn.valid) || RegNext(load_s2.io.dcacheRequireReplay)) 728 } 729 730 val lastValidData = RegEnable(io.ldout.bits.data, io.ldout.fire) 731 val hitLoadAddrTriggerHitVec = Wire(Vec(3, Bool())) 732 val lqLoadAddrTriggerHitVec = io.lsq.trigger.lqLoadAddrTriggerHitVec 733 (0 until 3).map{i => { 734 val tdata2 = io.trigger(i).tdata2 735 val matchType = io.trigger(i).matchType 736 val tEnable = io.trigger(i).tEnable 737 738 hitLoadAddrTriggerHitVec(i) := TriggerCmp(load_s2.io.out.bits.vaddr, tdata2, matchType, tEnable) 739 io.trigger(i).addrHit := Mux(hitLoadOut.valid, hitLoadAddrTriggerHitVec(i), lqLoadAddrTriggerHitVec(i)) 740 io.trigger(i).lastDataHit := TriggerCmp(lastValidData, tdata2, matchType, tEnable) 741 }} 742 io.lsq.trigger.hitLoadAddrTriggerHitVec := hitLoadAddrTriggerHitVec 743 744 val perfEvents = Seq( 745 ("load_s0_in_fire ", load_s0.io.in.fire ), 746 ("load_to_load_forward ", load_s1.io.out.valid && s1_tryPointerChasing && !cancelPointerChasing ), 747 ("stall_dcache ", load_s0.io.out.valid && load_s0.io.out.ready && !load_s0.io.dcacheReq.ready ), 748 ("load_s1_in_fire ", load_s1.io.in.fire ), 749 ("load_s1_tlb_miss ", load_s1.io.in.fire && load_s1.io.dtlbResp.bits.miss ), 750 ("load_s2_in_fire ", load_s2.io.in.fire ), 751 ("load_s2_dcache_miss ", load_s2.io.in.fire && load_s2.io.dcacheResp.bits.miss ), 752 ("load_s2_replay ", load_s2.io.rsFeedback.valid && !load_s2.io.rsFeedback.bits.hit ), 753 ("load_s2_replay_tlb_miss ", load_s2.io.rsFeedback.valid && !load_s2.io.rsFeedback.bits.hit && load_s2.io.in.bits.tlbMiss ), 754 ("load_s2_replay_cache ", load_s2.io.rsFeedback.valid && !load_s2.io.rsFeedback.bits.hit && !load_s2.io.in.bits.tlbMiss && load_s2.io.dcacheResp.bits.miss), 755 ) 756 generatePerfEvent() 757 758 when(io.ldout.fire){ 759 XSDebug("ldout %x\n", io.ldout.bits.uop.cf.pc) 760 } 761} 762