1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.mem 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import xiangshan.ExceptionNO._ 24import xiangshan._ 25import xiangshan.backend.fu.PMPRespBundle 26import xiangshan.cache._ 27import xiangshan.cache.mmu.{TlbCmd, TlbReq, TlbRequestIO, TlbResp} 28 29class LoadToLsqFastIO(implicit p: Parameters) extends XSBundle { 30 val valid = Output(Bool()) 31 val ld_ld_check_ok = Output(Bool()) 32 val st_ld_check_ok = Output(Bool()) 33 val cache_bank_no_conflict = Output(Bool()) 34 val ld_idx = Output(UInt(log2Ceil(LoadQueueSize).W)) 35} 36 37class LoadToLsqSlowIO(implicit p: Parameters) extends XSBundle { 38 val valid = Output(Bool()) 39 val tlb_hited = Output(Bool()) 40 val st_ld_check_ok = Output(Bool()) 41 val cache_no_replay = Output(Bool()) 42 val forward_data_valid = Output(Bool()) 43 val ld_idx = Output(UInt(log2Ceil(LoadQueueSize).W)) 44 val data_invalid_sq_idx = Output(UInt(log2Ceil(StoreQueueSize).W)) 45} 46 47class LoadToLsqIO(implicit p: Parameters) extends XSBundle { 48 val loadIn = ValidIO(new LqWriteBundle) 49 val loadPaddrIn = ValidIO(new LqPaddrWriteBundle) 50 val loadVaddrIn = ValidIO(new LqVaddrWriteBundle) 51 val ldout = Flipped(DecoupledIO(new ExuOutput)) 52 val ldRawData = Input(new LoadDataFromLQBundle) 53 val s2_load_data_forwarded = Output(Bool()) 54 val s3_delayed_load_error = Output(Bool()) 55 val s2_dcache_require_replay = Output(Bool()) 56 val s3_replay_from_fetch = Output(Bool()) // update uop.ctrl.replayInst in load queue in s3 57 val forward = new PipeLoadForwardQueryIO 58 val loadViolationQuery = new LoadViolationQueryIO 59 val trigger = Flipped(new LqTriggerIO) 60 61 // for load replay 62 val replayFast = new LoadToLsqFastIO 63 val replaySlow = new LoadToLsqSlowIO 64} 65 66class LoadToLoadIO(implicit p: Parameters) extends XSBundle { 67 // load to load fast path is limited to ld (64 bit) used as vaddr src1 only 68 val data = UInt(XLEN.W) 69 val valid = Bool() 70} 71 72class LoadUnitTriggerIO(implicit p: Parameters) extends XSBundle { 73 val tdata2 = Input(UInt(64.W)) 74 val matchType = Input(UInt(2.W)) 75 val tEnable = Input(Bool()) // timing is calculated before this 76 val addrHit = Output(Bool()) 77 val lastDataHit = Output(Bool()) 78} 79 80// Load Pipeline Stage 0 81// Generate addr, use addr to query DCache and DTLB 82class LoadUnit_S0(implicit p: Parameters) extends XSModule with HasDCacheParameters{ 83 val io = IO(new Bundle() { 84 val in = Flipped(Decoupled(new ExuInput)) 85 val out = Decoupled(new LsPipelineBundle) 86 val dtlbReq = DecoupledIO(new TlbReq) 87 val dcacheReq = DecoupledIO(new DCacheWordReq) 88 val rsIdx = Input(UInt(log2Up(IssQueSize).W)) 89 val isFirstIssue = Input(Bool()) 90 val fastpath = Input(new LoadToLoadIO) 91 val s0_kill = Input(Bool()) 92 // wire from lq to load pipeline 93 val lsqOut = Flipped(Decoupled(new LsPipelineBundle)) 94 95 val s0_sqIdx = Output(new SqPtr) 96 }) 97 require(LoadPipelineWidth == exuParameters.LduCnt) 98 99 // there are three sources of load pipeline's input 100 // * 1. load issued by RS (io.in) 101 // * 2. load replayed by LSQ (io.lsqOut) 102 // * 3. load try pointchaising when no issued or replayed load (io.fastpath) 103 104 // the priority is 105 // 1 > 2 > 3 106 // now in S0, choise a load according to priority 107 108 val s0_vaddr = Wire(UInt(VAddrBits.W)) 109 val s0_mask = Wire(UInt(8.W)) 110 val s0_uop = Wire(new MicroOp) 111 val s0_isFirstIssue = Wire(Bool()) 112 val s0_rsIdx = Wire(UInt(log2Up(IssQueSize).W)) 113 val s0_sqIdx = Wire(new SqPtr) 114 115 io.s0_sqIdx := s0_sqIdx 116 117 val tryFastpath = WireInit(false.B) 118 119 val s0_valid = Wire(Bool()) 120 121 s0_valid := io.in.valid || io.lsqOut.valid || tryFastpath 122 123 // assign default value 124 s0_uop := DontCare 125 126 when(io.in.valid) { 127 val imm12 = io.in.bits.uop.ctrl.imm(11, 0) 128 s0_vaddr := io.in.bits.src(0) + SignExt(imm12, VAddrBits) 129 s0_mask := genWmask(s0_vaddr, io.in.bits.uop.ctrl.fuOpType(1,0)) 130 s0_uop := io.in.bits.uop 131 s0_isFirstIssue := io.isFirstIssue 132 s0_rsIdx := io.rsIdx 133 s0_sqIdx := io.in.bits.uop.sqIdx 134 135 }.elsewhen(io.lsqOut.valid) { 136 s0_vaddr := io.lsqOut.bits.vaddr 137 s0_mask := io.lsqOut.bits.mask 138 s0_uop := io.lsqOut.bits.uop 139 s0_isFirstIssue := io.lsqOut.bits.isFirstIssue 140 s0_rsIdx := io.lsqOut.bits.rsIdx 141 s0_sqIdx := io.lsqOut.bits.uop.sqIdx 142 143 }.otherwise { 144 if (EnableLoadToLoadForward) { 145 tryFastpath := io.fastpath.valid 146 // When there's no valid instruction from RS and LSQ, we try the load-to-load forwarding. 147 s0_vaddr := io.fastpath.data 148 // Assume the pointer chasing is always ld. 149 s0_uop.ctrl.fuOpType := LSUOpType.ld 150 s0_mask := genWmask(0.U, LSUOpType.ld) 151 // we dont care s0_isFirstIssue and s0_rsIdx and s0_sqIdx in S0 when trying pointchasing 152 // because these signals will be updated in S1 153 s0_isFirstIssue := DontCare 154 s0_rsIdx := DontCare 155 s0_sqIdx := DontCare 156 } 157 } 158 159 val addrAligned = LookupTree(s0_uop.ctrl.fuOpType(1, 0), List( 160 "b00".U -> true.B, //b 161 "b01".U -> (s0_vaddr(0) === 0.U), //h 162 "b10".U -> (s0_vaddr(1, 0) === 0.U), //w 163 "b11".U -> (s0_vaddr(2, 0) === 0.U) //d 164 )) 165 166 // io.in has highest priority 167 io.in.ready := !io.in.valid || (io.out.ready && io.dcacheReq.ready) 168 // io.lsqOut can fire only when there in no RS-issued load 169 io.lsqOut.ready := (io.out.ready && io.dcacheReq.ready && !io.in.valid) 170 171 val isSoftPrefetch = LSUOpType.isPrefetch(s0_uop.ctrl.fuOpType) 172 val isSoftPrefetchRead = s0_uop.ctrl.fuOpType === LSUOpType.prefetch_r 173 val isSoftPrefetchWrite = s0_uop.ctrl.fuOpType === LSUOpType.prefetch_w 174 175 // query DTLB 176 io.dtlbReq.valid := s0_valid 177 io.dtlbReq.bits.vaddr := s0_vaddr 178 io.dtlbReq.bits.cmd := TlbCmd.read 179 io.dtlbReq.bits.size := LSUOpType.size(s0_uop.ctrl.fuOpType) 180 io.dtlbReq.bits.kill := DontCare 181 io.dtlbReq.bits.debug.robIdx := s0_uop.robIdx 182 io.dtlbReq.bits.debug.pc := s0_uop.cf.pc 183 io.dtlbReq.bits.debug.isFirstIssue := s0_isFirstIssue 184 185 // query DCache 186 io.dcacheReq.valid := s0_valid 187 when (isSoftPrefetchRead) { 188 io.dcacheReq.bits.cmd := MemoryOpConstants.M_PFR 189 }.elsewhen (isSoftPrefetchWrite) { 190 io.dcacheReq.bits.cmd := MemoryOpConstants.M_PFW 191 }.otherwise { 192 io.dcacheReq.bits.cmd := MemoryOpConstants.M_XRD 193 } 194 io.dcacheReq.bits.addr := s0_vaddr 195 io.dcacheReq.bits.mask := s0_mask 196 io.dcacheReq.bits.data := DontCare 197 when(isSoftPrefetch) { 198 io.dcacheReq.bits.instrtype := SOFT_PREFETCH.U 199 }.otherwise { 200 io.dcacheReq.bits.instrtype := LOAD_SOURCE.U 201 } 202 203 // TODO: update cache meta 204 io.dcacheReq.bits.id := DontCare 205 206 io.out.valid := s0_valid && io.dcacheReq.ready && !io.s0_kill 207 208 io.out.bits := DontCare 209 io.out.bits.vaddr := s0_vaddr 210 io.out.bits.mask := s0_mask 211 io.out.bits.uop := s0_uop 212 io.out.bits.uop.cf.exceptionVec(loadAddrMisaligned) := !addrAligned 213 io.out.bits.rsIdx := s0_rsIdx 214 io.out.bits.isFirstIssue := s0_isFirstIssue 215 io.out.bits.isSoftPrefetch := isSoftPrefetch 216 io.out.bits.isLoadReplay := !io.in.valid && io.lsqOut.valid 217 218 XSDebug(io.dcacheReq.fire, 219 p"[DCACHE LOAD REQ] pc ${Hexadecimal(s0_uop.cf.pc)}, vaddr ${Hexadecimal(s0_vaddr)}\n" 220 ) 221 XSPerfAccumulate("in_valid", io.in.valid) 222 XSPerfAccumulate("in_fire", io.in.fire) 223 XSPerfAccumulate("in_fire_first_issue", io.in.valid && io.isFirstIssue) 224 XSPerfAccumulate("stall_out", io.out.valid && !io.out.ready && io.dcacheReq.ready) 225 XSPerfAccumulate("stall_dcache", io.out.valid && io.out.ready && !io.dcacheReq.ready) 226 XSPerfAccumulate("addr_spec_success", io.out.fire && s0_vaddr(VAddrBits-1, 12) === io.in.bits.src(0)(VAddrBits-1, 12)) 227 XSPerfAccumulate("addr_spec_failed", io.out.fire && s0_vaddr(VAddrBits-1, 12) =/= io.in.bits.src(0)(VAddrBits-1, 12)) 228 XSPerfAccumulate("addr_spec_success_once", io.out.fire && s0_vaddr(VAddrBits-1, 12) === io.in.bits.src(0)(VAddrBits-1, 12) && io.isFirstIssue) 229 XSPerfAccumulate("addr_spec_failed_once", io.out.fire && s0_vaddr(VAddrBits-1, 12) =/= io.in.bits.src(0)(VAddrBits-1, 12) && io.isFirstIssue) 230} 231 232 233// Load Pipeline Stage 1 234// TLB resp (send paddr to dcache) 235class LoadUnit_S1(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper { 236 val io = IO(new Bundle() { 237 val in = Flipped(Decoupled(new LsPipelineBundle)) 238 val s1_kill = Input(Bool()) 239 val out = Decoupled(new LsPipelineBundle) 240 val dtlbResp = Flipped(DecoupledIO(new TlbResp(2))) 241 val lsuPAddr = Output(UInt(PAddrBits.W)) 242 val dcachePAddr = Output(UInt(PAddrBits.W)) 243 val dcacheKill = Output(Bool()) 244 val dcacheBankConflict = Input(Bool()) 245 val fullForwardFast = Output(Bool()) 246 val sbuffer = new LoadForwardQueryIO 247 val lsq = new PipeLoadForwardQueryIO 248 val loadViolationQueryReq = Decoupled(new LoadViolationQueryReq) 249 val reExecuteQuery = Flipped(Vec(StorePipelineWidth, Valid(new LoadReExecuteQueryIO))) 250 val rsFeedback = ValidIO(new RSFeedback) 251 val replayFast = new LoadToLsqFastIO 252 val csrCtrl = Flipped(new CustomCSRCtrlIO) 253 val needLdVioCheckRedo = Output(Bool()) 254 val needReExecute = Output(Bool()) 255 }) 256 257 val s1_uop = io.in.bits.uop 258 val s1_paddr_dup_lsu = io.dtlbResp.bits.paddr(0) 259 val s1_paddr_dup_dcache = io.dtlbResp.bits.paddr(1) 260 // af & pf exception were modified below. 261 val s1_exception = ExceptionNO.selectByFu(io.out.bits.uop.cf.exceptionVec, lduCfg).asUInt.orR 262 val s1_tlb_miss = io.dtlbResp.bits.miss 263 val s1_mask = io.in.bits.mask 264 val s1_bank_conflict = io.dcacheBankConflict 265 266 io.out.bits := io.in.bits // forwardXX field will be updated in s1 267 268 io.dtlbResp.ready := true.B 269 270 io.lsuPAddr := s1_paddr_dup_lsu 271 io.dcachePAddr := s1_paddr_dup_dcache 272 //io.dcacheKill := s1_tlb_miss || s1_exception || s1_mmio 273 io.dcacheKill := s1_tlb_miss || s1_exception || io.s1_kill 274 // load forward query datapath 275 io.sbuffer.valid := io.in.valid && !(s1_exception || s1_tlb_miss || io.s1_kill) 276 io.sbuffer.vaddr := io.in.bits.vaddr 277 io.sbuffer.paddr := s1_paddr_dup_lsu 278 io.sbuffer.uop := s1_uop 279 io.sbuffer.sqIdx := s1_uop.sqIdx 280 io.sbuffer.mask := s1_mask 281 io.sbuffer.pc := s1_uop.cf.pc // FIXME: remove it 282 283 io.lsq.valid := io.in.valid && !(s1_exception || s1_tlb_miss || io.s1_kill) 284 io.lsq.vaddr := io.in.bits.vaddr 285 io.lsq.paddr := s1_paddr_dup_lsu 286 io.lsq.uop := s1_uop 287 io.lsq.sqIdx := s1_uop.sqIdx 288 io.lsq.sqIdxMask := DontCare // will be overwritten by sqIdxMask pre-generated in s0 289 io.lsq.mask := s1_mask 290 io.lsq.pc := s1_uop.cf.pc // FIXME: remove it 291 292 // ld-ld violation query 293 io.loadViolationQueryReq.valid := io.in.valid && !(s1_exception || s1_tlb_miss || io.s1_kill) 294 io.loadViolationQueryReq.bits.paddr := s1_paddr_dup_lsu 295 io.loadViolationQueryReq.bits.uop := s1_uop 296 297 // st-ld violation query 298 val needReExecuteVec = Wire(Vec(StorePipelineWidth, Bool())) 299 val needReExecute = Wire(Bool()) 300 301 for (w <- 0 until StorePipelineWidth) { 302 // needReExecute valid when 303 // 1. ReExecute query request valid. 304 // 2. Load instruction is younger than requestors(store instructions). 305 // 3. Physical address match. 306 // 4. Data contains. 307 308 needReExecuteVec(w) := io.reExecuteQuery(w).valid && 309 isAfter(io.in.bits.uop.robIdx, io.reExecuteQuery(w).bits.robIdx) && 310 !s1_tlb_miss && 311 (s1_paddr_dup_lsu(PAddrBits-1, 3) === io.reExecuteQuery(w).bits.paddr(PAddrBits-1, 3)) && 312 (s1_mask & io.reExecuteQuery(w).bits.mask).orR 313 } 314 needReExecute := needReExecuteVec.asUInt.orR 315 io.needReExecute := needReExecute 316 317 // Generate forwardMaskFast to wake up insts earlier 318 val forwardMaskFast = io.lsq.forwardMaskFast.asUInt | io.sbuffer.forwardMaskFast.asUInt 319 io.fullForwardFast := ((~forwardMaskFast).asUInt & s1_mask) === 0.U 320 321 // Generate feedback signal caused by: 322 // * dcache bank conflict 323 // * need redo ld-ld violation check 324 val needLdVioCheckRedo = io.loadViolationQueryReq.valid && 325 !io.loadViolationQueryReq.ready && 326 RegNext(io.csrCtrl.ldld_vio_check_enable) 327 io.needLdVioCheckRedo := needLdVioCheckRedo 328 // io.rsFeedback.valid := io.in.valid && (s1_bank_conflict || needLdVioCheckRedo) && !io.s1_kill 329 io.rsFeedback.valid := Mux(io.in.bits.isLoadReplay, false.B, io.in.valid && !io.s1_kill) 330 io.rsFeedback.bits.hit := true.B // we have found s1_bank_conflict / re do ld-ld violation check 331 io.rsFeedback.bits.rsIdx := io.in.bits.rsIdx 332 io.rsFeedback.bits.flushState := io.in.bits.ptwBack 333 io.rsFeedback.bits.sourceType := Mux(s1_bank_conflict, RSFeedbackType.bankConflict, RSFeedbackType.ldVioCheckRedo) 334 io.rsFeedback.bits.dataInvalidSqIdx := DontCare 335 336 io.replayFast.valid := io.in.valid && !io.s1_kill 337 io.replayFast.ld_ld_check_ok := !needLdVioCheckRedo 338 io.replayFast.st_ld_check_ok := !needReExecute 339 io.replayFast.cache_bank_no_conflict := !s1_bank_conflict 340 io.replayFast.ld_idx := io.in.bits.uop.lqIdx.value 341 342 // if replay is detected in load_s1, 343 // load inst will be canceled immediately 344 io.out.valid := io.in.valid && (!needLdVioCheckRedo && !s1_bank_conflict && !needReExecute) && !io.s1_kill 345 io.out.bits.paddr := s1_paddr_dup_lsu 346 io.out.bits.tlbMiss := s1_tlb_miss 347 348 // current ori test will cause the case of ldest == 0, below will be modifeid in the future. 349 // af & pf exception were modified 350 io.out.bits.uop.cf.exceptionVec(loadPageFault) := io.dtlbResp.bits.excp(0).pf.ld 351 io.out.bits.uop.cf.exceptionVec(loadAccessFault) := io.dtlbResp.bits.excp(0).af.ld 352 353 io.out.bits.ptwBack := io.dtlbResp.bits.ptwBack 354 io.out.bits.rsIdx := io.in.bits.rsIdx 355 356 io.out.bits.isSoftPrefetch := io.in.bits.isSoftPrefetch 357 358 io.in.ready := !io.in.valid || io.out.ready 359 360 XSPerfAccumulate("in_valid", io.in.valid) 361 XSPerfAccumulate("in_fire", io.in.fire) 362 XSPerfAccumulate("in_fire_first_issue", io.in.fire && io.in.bits.isFirstIssue) 363 XSPerfAccumulate("tlb_miss", io.in.fire && s1_tlb_miss) 364 XSPerfAccumulate("tlb_miss_first_issue", io.in.fire && s1_tlb_miss && io.in.bits.isFirstIssue) 365 XSPerfAccumulate("stall_out", io.out.valid && !io.out.ready) 366} 367 368// Load Pipeline Stage 2 369// DCache resp 370class LoadUnit_S2(implicit p: Parameters) extends XSModule with HasLoadHelper with HasCircularQueuePtrHelper { 371 val io = IO(new Bundle() { 372 val in = Flipped(Decoupled(new LsPipelineBundle)) 373 val out = Decoupled(new LsPipelineBundle) 374 val rsFeedback = ValidIO(new RSFeedback) 375 val replaySlow = new LoadToLsqSlowIO 376 val dcacheResp = Flipped(DecoupledIO(new BankedDCacheWordResp)) 377 val pmpResp = Flipped(new PMPRespBundle()) 378 val lsq = new LoadForwardQueryIO 379 val dataInvalidSqIdx = Input(UInt()) 380 val sbuffer = new LoadForwardQueryIO 381 val dataForwarded = Output(Bool()) 382 val s2_dcache_require_replay = Output(Bool()) 383 val fullForward = Output(Bool()) 384 val dcache_kill = Output(Bool()) 385 val s3_delayed_load_error = Output(Bool()) 386 val loadViolationQueryResp = Flipped(Valid(new LoadViolationQueryResp)) 387 val csrCtrl = Flipped(new CustomCSRCtrlIO) 388 val sentFastUop = Input(Bool()) 389 val static_pm = Input(Valid(Bool())) // valid for static, bits for mmio 390 val s2_can_replay_from_fetch = Output(Bool()) // dirty code 391 val loadDataFromDcache = Output(new LoadDataFromDcacheBundle) 392 val reExecuteQuery = Flipped(Vec(StorePipelineWidth, Valid(new LoadReExecuteQueryIO))) 393 val needReExecute = Output(Bool()) 394 // val write_lq_safe = Output(Bool()) // used by duplicate wen signals 395 }) 396 397 val pmp = WireInit(io.pmpResp) 398 when (io.static_pm.valid) { 399 pmp.ld := false.B 400 pmp.st := false.B 401 pmp.instr := false.B 402 pmp.mmio := io.static_pm.bits 403 } 404 405 val s2_is_prefetch = io.in.bits.isSoftPrefetch 406 407 // exception that may cause load addr to be invalid / illegal 408 // 409 // if such exception happen, that inst and its exception info 410 // will be force writebacked to rob 411 val s2_exception_vec = WireInit(io.in.bits.uop.cf.exceptionVec) 412 s2_exception_vec(loadAccessFault) := io.in.bits.uop.cf.exceptionVec(loadAccessFault) || pmp.ld 413 // soft prefetch will not trigger any exception (but ecc error interrupt may be triggered) 414 when (s2_is_prefetch) { 415 s2_exception_vec := 0.U.asTypeOf(s2_exception_vec.cloneType) 416 } 417 val s2_exception = ExceptionNO.selectByFu(s2_exception_vec, lduCfg).asUInt.orR && !io.in.bits.tlbMiss // ???????? 418 419 // writeback access fault caused by ecc error / bus error 420 // 421 // * ecc data error is slow to generate, so we will not use it until load stage 3 422 // * in load stage 3, an extra signal io.load_error will be used to 423 424 // now cache ecc error will raise an access fault 425 // at the same time, error info (including error paddr) will be write to 426 // an customized CSR "CACHE_ERROR" 427 if (EnableAccurateLoadError) { 428 io.s3_delayed_load_error := io.dcacheResp.bits.error_delayed && 429 io.csrCtrl.cache_error_enable && 430 RegNext(io.out.valid) 431 } else { 432 io.s3_delayed_load_error := false.B 433 } 434 435 val actually_mmio = pmp.mmio 436 val s2_uop = io.in.bits.uop 437 val s2_mask = io.in.bits.mask 438 val s2_paddr = io.in.bits.paddr 439 val s2_tlb_miss = io.in.bits.tlbMiss 440 val s2_mmio = !s2_is_prefetch && actually_mmio && !s2_exception 441 val s2_cache_miss = io.dcacheResp.bits.miss 442 val s2_cache_replay = io.dcacheResp.bits.replay 443 val s2_cache_tag_error = io.dcacheResp.bits.tag_error 444 val s2_forward_fail = io.lsq.matchInvalid || io.sbuffer.matchInvalid 445 val s2_ldld_violation = io.loadViolationQueryResp.valid && 446 io.loadViolationQueryResp.bits.have_violation && 447 RegNext(io.csrCtrl.ldld_vio_check_enable) 448 val s2_data_invalid = io.lsq.dataInvalid && !s2_ldld_violation && !s2_exception 449 450 io.dcache_kill := pmp.ld || pmp.mmio // move pmp resp kill to outside 451 io.dcacheResp.ready := true.B 452 val dcacheShouldResp = !(s2_tlb_miss || s2_exception || s2_mmio || s2_is_prefetch) 453 assert(!(io.in.valid && (dcacheShouldResp && !io.dcacheResp.valid)), "DCache response got lost") 454 455 // merge forward result 456 // lsq has higher priority than sbuffer 457 val forwardMask = Wire(Vec(8, Bool())) 458 val forwardData = Wire(Vec(8, UInt(8.W))) 459 460 val fullForward = ((~forwardMask.asUInt).asUInt & s2_mask) === 0.U && !io.lsq.dataInvalid 461 io.lsq := DontCare 462 io.sbuffer := DontCare 463 io.fullForward := fullForward 464 465 // generate XLEN/8 Muxs 466 for (i <- 0 until XLEN / 8) { 467 forwardMask(i) := io.lsq.forwardMask(i) || io.sbuffer.forwardMask(i) 468 forwardData(i) := Mux(io.lsq.forwardMask(i), io.lsq.forwardData(i), io.sbuffer.forwardData(i)) 469 } 470 471 XSDebug(io.out.fire, "[FWD LOAD RESP] pc %x fwd %x(%b) + %x(%b)\n", 472 s2_uop.cf.pc, 473 io.lsq.forwardData.asUInt, io.lsq.forwardMask.asUInt, 474 io.in.bits.forwardData.asUInt, io.in.bits.forwardMask.asUInt 475 ) 476 477 // data merge 478 // val rdataVec = VecInit((0 until XLEN / 8).map(j => 479 // Mux(forwardMask(j), forwardData(j), io.dcacheResp.bits.data(8*(j+1)-1, 8*j)) 480 // )) // s2_rdataVec will be write to load queue 481 // val rdata = rdataVec.asUInt 482 // val rdataSel = LookupTree(s2_paddr(2, 0), List( 483 // "b000".U -> rdata(63, 0), 484 // "b001".U -> rdata(63, 8), 485 // "b010".U -> rdata(63, 16), 486 // "b011".U -> rdata(63, 24), 487 // "b100".U -> rdata(63, 32), 488 // "b101".U -> rdata(63, 40), 489 // "b110".U -> rdata(63, 48), 490 // "b111".U -> rdata(63, 56) 491 // )) 492 // val rdataPartialLoad = rdataHelper(s2_uop, rdataSel) // s2_rdataPartialLoad is not used 493 494 io.out.valid := io.in.valid && !s2_tlb_miss && !s2_data_invalid && !io.needReExecute 495 // write_lq_safe is needed by dup logic 496 // io.write_lq_safe := !s2_tlb_miss && !s2_data_invalid 497 // Inst will be canceled in store queue / lsq, 498 // so we do not need to care about flush in load / store unit's out.valid 499 io.out.bits := io.in.bits 500 // io.out.bits.data := rdataPartialLoad 501 io.out.bits.data := 0.U // data will be generated in load_s3 502 // when exception occurs, set it to not miss and let it write back to rob (via int port) 503 if (EnableFastForward) { 504 io.out.bits.miss := s2_cache_miss && 505 !s2_exception && 506 !fullForward && 507 !s2_is_prefetch 508 } else { 509 io.out.bits.miss := s2_cache_miss && 510 !s2_exception && 511 !s2_is_prefetch 512 } 513 io.out.bits.uop.ctrl.fpWen := io.in.bits.uop.ctrl.fpWen && !s2_exception 514 515 io.loadDataFromDcache.bankedDcacheData := io.dcacheResp.bits.bank_data 516 io.loadDataFromDcache.bank_oh := io.dcacheResp.bits.bank_oh 517 // io.loadDataFromDcache.dcacheData := io.dcacheResp.bits.data 518 io.loadDataFromDcache.forwardMask := forwardMask 519 io.loadDataFromDcache.forwardData := forwardData 520 io.loadDataFromDcache.uop := io.out.bits.uop 521 io.loadDataFromDcache.addrOffset := s2_paddr(2, 0) 522 523 io.s2_can_replay_from_fetch := !s2_mmio && !s2_is_prefetch && !s2_tlb_miss 524 // if forward fail, replay this inst from fetch 525 val debug_forwardFailReplay = s2_forward_fail && !s2_mmio && !s2_is_prefetch && !s2_tlb_miss 526 // if ld-ld violation is detected, replay from this inst from fetch 527 val debug_ldldVioReplay = s2_ldld_violation && !s2_mmio && !s2_is_prefetch && !s2_tlb_miss 528 // io.out.bits.uop.ctrl.replayInst := false.B 529 530 io.out.bits.mmio := s2_mmio 531 io.out.bits.uop.ctrl.flushPipe := s2_mmio && io.sentFastUop 532 io.out.bits.uop.cf.exceptionVec := s2_exception_vec // cache error not included 533 534 // For timing reasons, sometimes we can not let 535 // io.out.bits.miss := s2_cache_miss && !s2_exception && !fullForward 536 // We use io.dataForwarded instead. It means: 537 // 1. Forward logic have prepared all data needed, 538 // and dcache query is no longer needed. 539 // 2. ... or data cache tag error is detected, this kind of inst 540 // will not update miss queue. That is to say, if miss, that inst 541 // may not be refilled 542 // Such inst will be writebacked from load queue. 543 io.dataForwarded := s2_cache_miss && !s2_exception && 544 (fullForward || io.csrCtrl.cache_error_enable && s2_cache_tag_error) 545 // io.out.bits.forwardX will be send to lq 546 io.out.bits.forwardMask := forwardMask 547 // data from dcache is not included in io.out.bits.forwardData 548 io.out.bits.forwardData := forwardData 549 550 io.in.ready := io.out.ready || !io.in.valid 551 552 553 // st-ld violation query 554 val needReExecuteVec = Wire(Vec(StorePipelineWidth, Bool())) 555 val needReExecute = Wire(Bool()) 556 557 for (i <- 0 until StorePipelineWidth) { 558 // NeedFastRecovery Valid when 559 // 1. Fast recovery query request Valid. 560 // 2. Load instruction is younger than requestors(store instructions). 561 // 3. Physical address match. 562 // 4. Data contains. 563 needReExecuteVec(i) := io.reExecuteQuery(i).valid && 564 isAfter(io.in.bits.uop.robIdx, io.reExecuteQuery(i).bits.robIdx) && 565 !s2_tlb_miss && 566 (s2_paddr(PAddrBits-1,3) === io.reExecuteQuery(i).bits.paddr(PAddrBits-1, 3)) && 567 (s2_mask & io.reExecuteQuery(i).bits.mask).orR 568 } 569 needReExecute := needReExecuteVec.asUInt.orR 570 io.needReExecute := needReExecute 571 572 // feedback tlb result to RS 573 io.rsFeedback.valid := false.B 574 val s2_need_replay_from_rs = Wire(Bool()) 575 if (EnableFastForward) { 576 s2_need_replay_from_rs := 577 needReExecute || 578 s2_tlb_miss || // replay if dtlb miss 579 s2_cache_replay && !s2_is_prefetch && !s2_mmio && !s2_exception && !fullForward || // replay if dcache miss queue full / busy 580 s2_data_invalid && !s2_is_prefetch // replay if store to load forward data is not ready 581 } else { 582 // Note that if all parts of data are available in sq / sbuffer, replay required by dcache will not be scheduled 583 s2_need_replay_from_rs := 584 needReExecute || 585 s2_tlb_miss || // replay if dtlb miss 586 s2_cache_replay && !s2_is_prefetch && !s2_mmio && !s2_exception && !io.dataForwarded || // replay if dcache miss queue full / busy 587 s2_data_invalid && !s2_is_prefetch // replay if store to load forward data is not ready 588 } 589 io.rsFeedback.bits.hit := !s2_need_replay_from_rs 590 io.rsFeedback.bits.rsIdx := io.in.bits.rsIdx 591 io.rsFeedback.bits.flushState := io.in.bits.ptwBack 592 // feedback source priority: tlbMiss > dataInvalid > mshrFull 593 // general case priority: tlbMiss > exception (include forward_fail / ldld_violation) > mmio > dataInvalid > mshrFull > normal miss / hit 594 io.rsFeedback.bits.sourceType := Mux(s2_tlb_miss, RSFeedbackType.tlbMiss, 595 Mux(s2_data_invalid, 596 RSFeedbackType.dataInvalid, 597 RSFeedbackType.mshrFull 598 ) 599 ) 600 io.rsFeedback.bits.dataInvalidSqIdx.value := io.dataInvalidSqIdx 601 io.rsFeedback.bits.dataInvalidSqIdx.flag := DontCare 602 603 io.replaySlow.valid := io.in.valid 604 io.replaySlow.tlb_hited := !s2_tlb_miss 605 io.replaySlow.st_ld_check_ok := !needReExecute 606 if (EnableFastForward) { 607 io.replaySlow.cache_no_replay := !s2_cache_replay || s2_is_prefetch || s2_mmio || s2_exception || fullForward 608 }else { 609 io.replaySlow.cache_no_replay := !s2_cache_replay || s2_is_prefetch || s2_mmio || s2_exception || io.dataForwarded 610 } 611 io.replaySlow.forward_data_valid := !s2_data_invalid || s2_is_prefetch 612 io.replaySlow.ld_idx := io.in.bits.uop.lqIdx.value 613 io.replaySlow.data_invalid_sq_idx := io.dataInvalidSqIdx 614 615 // s2_cache_replay is quite slow to generate, send it separately to LQ 616 if (EnableFastForward) { 617 io.s2_dcache_require_replay := s2_cache_replay && !fullForward 618 } else { 619 io.s2_dcache_require_replay := s2_cache_replay && 620 s2_need_replay_from_rs && 621 !io.dataForwarded && 622 !s2_is_prefetch && 623 io.out.bits.miss 624 } 625 626 XSPerfAccumulate("in_valid", io.in.valid) 627 XSPerfAccumulate("in_fire", io.in.fire) 628 XSPerfAccumulate("in_fire_first_issue", io.in.fire && io.in.bits.isFirstIssue) 629 XSPerfAccumulate("dcache_miss", io.in.fire && s2_cache_miss) 630 XSPerfAccumulate("dcache_miss_first_issue", io.in.fire && s2_cache_miss && io.in.bits.isFirstIssue) 631 XSPerfAccumulate("full_forward", io.in.valid && fullForward) 632 XSPerfAccumulate("dcache_miss_full_forward", io.in.valid && s2_cache_miss && fullForward) 633 XSPerfAccumulate("replay", io.rsFeedback.valid && !io.rsFeedback.bits.hit) 634 XSPerfAccumulate("replay_tlb_miss", io.rsFeedback.valid && !io.rsFeedback.bits.hit && s2_tlb_miss) 635 XSPerfAccumulate("replay_cache", io.rsFeedback.valid && !io.rsFeedback.bits.hit && !s2_tlb_miss && s2_cache_replay) 636 XSPerfAccumulate("stall_out", io.out.valid && !io.out.ready) 637 XSPerfAccumulate("replay_from_fetch_forward", io.out.valid && debug_forwardFailReplay) 638 XSPerfAccumulate("replay_from_fetch_load_vio", io.out.valid && debug_ldldVioReplay) 639 640 XSPerfAccumulate("replay_lq", io.replaySlow.valid && (!io.replaySlow.tlb_hited || !io.replaySlow.cache_no_replay || !io.replaySlow.forward_data_valid)) 641 XSPerfAccumulate("replay_tlb_miss_lq", io.replaySlow.valid && !io.replaySlow.tlb_hited) 642 XSPerfAccumulate("replay_sl_vio", io.replaySlow.valid && io.replaySlow.tlb_hited && !io.replaySlow.st_ld_check_ok) 643 XSPerfAccumulate("replay_cache_lq", io.replaySlow.valid && io.replaySlow.tlb_hited && io.replaySlow.st_ld_check_ok && !io.replaySlow.cache_no_replay) 644} 645 646class LoadUnit(implicit p: Parameters) extends XSModule 647 with HasLoadHelper 648 with HasPerfEvents 649 with HasDCacheParameters 650{ 651 val io = IO(new Bundle() { 652 val ldin = Flipped(Decoupled(new ExuInput)) 653 val ldout = Decoupled(new ExuOutput) 654 val redirect = Flipped(ValidIO(new Redirect)) 655 val feedbackSlow = ValidIO(new RSFeedback) 656 val feedbackFast = ValidIO(new RSFeedback) 657 val rsIdx = Input(UInt(log2Up(IssQueSize).W)) 658 val isFirstIssue = Input(Bool()) 659 val dcache = new DCacheLoadIO 660 val sbuffer = new LoadForwardQueryIO 661 val lsq = new LoadToLsqIO 662 val refill = Flipped(ValidIO(new Refill)) 663 val fastUop = ValidIO(new MicroOp) // early wakeup signal generated in load_s1, send to RS in load_s2 664 val trigger = Vec(3, new LoadUnitTriggerIO) 665 666 val tlb = new TlbRequestIO(2) 667 val pmp = Flipped(new PMPRespBundle()) // arrive same to tlb now 668 669 val fastpathOut = Output(new LoadToLoadIO) 670 val fastpathIn = Input(new LoadToLoadIO) 671 val loadFastMatch = Input(Bool()) 672 val loadFastImm = Input(UInt(12.W)) 673 674 val s3_delayed_load_error = Output(Bool()) // load ecc error 675 // Note that io.s3_delayed_load_error and io.lsq.s3_delayed_load_error is different 676 677 val csrCtrl = Flipped(new CustomCSRCtrlIO) 678 val reExecuteQuery = Flipped(Vec(StorePipelineWidth, Valid(new LoadReExecuteQueryIO))) // load replay 679 val lsqOut = Flipped(Decoupled(new LsPipelineBundle)) 680 }) 681 682 val load_s0 = Module(new LoadUnit_S0) 683 val load_s1 = Module(new LoadUnit_S1) 684 val load_s2 = Module(new LoadUnit_S2) 685 686 load_s0.io.lsqOut <> io.lsqOut 687 688 // load s0 689 load_s0.io.in <> io.ldin 690 load_s0.io.dtlbReq <> io.tlb.req 691 load_s0.io.dcacheReq <> io.dcache.req 692 load_s0.io.rsIdx := io.rsIdx 693 load_s0.io.isFirstIssue := io.isFirstIssue 694 load_s0.io.s0_kill := false.B 695 // we try pointerchasing when (1. no rs-issued load and 2. no LSQ replayed load) 696 val s0_tryPointerChasing = !io.ldin.valid && !io.lsqOut.valid && io.fastpathIn.valid 697 val s0_pointerChasingVAddr = io.fastpathIn.data(5, 0) +& io.loadFastImm(5, 0) 698 load_s0.io.fastpath.valid := io.fastpathIn.valid 699 load_s0.io.fastpath.data := Cat(io.fastpathIn.data(XLEN-1, 6), s0_pointerChasingVAddr(5,0)) 700 701 val s1_data = PipelineConnect(load_s0.io.out, load_s1.io.in, true.B, 702 load_s0.io.out.bits.uop.robIdx.needFlush(io.redirect) && !s0_tryPointerChasing).get 703 704 // load s1 705 // update s1_kill when any source has valid request 706 load_s1.io.s1_kill := RegEnable(load_s0.io.s0_kill, false.B, io.ldin.valid || io.lsqOut.valid || io.fastpathIn.valid) 707 io.tlb.req_kill := load_s1.io.s1_kill 708 load_s1.io.dtlbResp <> io.tlb.resp 709 io.dcache.s1_paddr_dup_lsu <> load_s1.io.lsuPAddr 710 io.dcache.s1_paddr_dup_dcache <> load_s1.io.dcachePAddr 711 io.dcache.s1_kill := load_s1.io.dcacheKill 712 load_s1.io.sbuffer <> io.sbuffer 713 load_s1.io.lsq <> io.lsq.forward 714 load_s1.io.loadViolationQueryReq <> io.lsq.loadViolationQuery.req 715 load_s1.io.dcacheBankConflict <> io.dcache.s1_bank_conflict 716 load_s1.io.csrCtrl <> io.csrCtrl 717 load_s1.io.reExecuteQuery := io.reExecuteQuery 718 // provide paddr and vaddr for lq 719 io.lsq.loadPaddrIn.valid := load_s1.io.out.valid 720 io.lsq.loadPaddrIn.bits.lqIdx := load_s1.io.out.bits.uop.lqIdx 721 io.lsq.loadPaddrIn.bits.paddr := load_s1.io.lsuPAddr 722 723 io.lsq.loadVaddrIn.valid := load_s1.io.in.valid && !load_s1.io.s1_kill 724 io.lsq.loadVaddrIn.bits.lqIdx := load_s1.io.out.bits.uop.lqIdx 725 io.lsq.loadVaddrIn.bits.vaddr := load_s1.io.out.bits.vaddr 726 727 // when S0 has opportunity to try pointerchasing, make sure it truely goes to S1 728 // which is S0's out is ready and dcache is ready 729 val s0_doTryPointerChasing = s0_tryPointerChasing && load_s0.io.out.ready && load_s0.io.dcacheReq.ready 730 val s1_tryPointerChasing = RegNext(s0_doTryPointerChasing, false.B) 731 val s1_pointerChasingVAddr = RegEnable(s0_pointerChasingVAddr, s0_doTryPointerChasing) 732 val cancelPointerChasing = WireInit(false.B) 733 if (EnableLoadToLoadForward) { 734 // Sometimes, we need to cancel the load-load forwarding. 735 // These can be put at S0 if timing is bad at S1. 736 // Case 0: CACHE_SET(base + offset) != CACHE_SET(base) (lowest 6-bit addition has an overflow) 737 val addressMisMatch = s1_pointerChasingVAddr(6) || RegEnable(io.loadFastImm(11, 6).orR, s0_doTryPointerChasing) 738 // Case 1: the address is not 64-bit aligned or the fuOpType is not LD 739 val addressNotAligned = s1_pointerChasingVAddr(2, 0).orR 740 val fuOpTypeIsNotLd = io.ldin.bits.uop.ctrl.fuOpType =/= LSUOpType.ld 741 // Case 2: this is not a valid load-load pair 742 val notFastMatch = RegEnable(!io.loadFastMatch, s0_tryPointerChasing) 743 // Case 3: this load-load uop is cancelled 744 val isCancelled = !io.ldin.valid 745 when (s1_tryPointerChasing) { 746 cancelPointerChasing := addressMisMatch || addressNotAligned || fuOpTypeIsNotLd || notFastMatch || isCancelled 747 load_s1.io.in.bits.uop := io.ldin.bits.uop 748 val spec_vaddr = s1_data.vaddr 749 val vaddr = Cat(spec_vaddr(VAddrBits - 1, 6), s1_pointerChasingVAddr(5, 3), 0.U(3.W)) 750 load_s1.io.in.bits.vaddr := vaddr 751 load_s1.io.in.bits.rsIdx := io.rsIdx 752 load_s1.io.in.bits.isFirstIssue := io.isFirstIssue 753 // We need to replace vaddr(5, 3). 754 val spec_paddr = io.tlb.resp.bits.paddr(0) 755 load_s1.io.dtlbResp.bits.paddr.foreach(_ := Cat(spec_paddr(PAddrBits - 1, 6), s1_pointerChasingVAddr(5, 3), 0.U(3.W))) 756 } 757 when (cancelPointerChasing) { 758 load_s1.io.s1_kill := true.B 759 }.otherwise { 760 load_s0.io.s0_kill := s1_tryPointerChasing 761 when (s1_tryPointerChasing) { 762 io.ldin.ready := true.B 763 } 764 } 765 766 XSPerfAccumulate("load_to_load_forward", s1_tryPointerChasing && !cancelPointerChasing) 767 XSPerfAccumulate("load_to_load_forward_try", s1_tryPointerChasing) 768 XSPerfAccumulate("load_to_load_forward_fail", cancelPointerChasing) 769 XSPerfAccumulate("load_to_load_forward_fail_cancelled", cancelPointerChasing && isCancelled) 770 XSPerfAccumulate("load_to_load_forward_fail_wakeup_mismatch", cancelPointerChasing && !isCancelled && notFastMatch) 771 XSPerfAccumulate("load_to_load_forward_fail_op_not_ld", 772 cancelPointerChasing && !isCancelled && !notFastMatch && fuOpTypeIsNotLd) 773 XSPerfAccumulate("load_to_load_forward_fail_addr_align", 774 cancelPointerChasing && !isCancelled && !notFastMatch && !fuOpTypeIsNotLd && addressNotAligned) 775 XSPerfAccumulate("load_to_load_forward_fail_set_mismatch", 776 cancelPointerChasing && !isCancelled && !notFastMatch && !fuOpTypeIsNotLd && !addressNotAligned && addressMisMatch) 777 } 778 PipelineConnect(load_s1.io.out, load_s2.io.in, true.B, 779 load_s1.io.out.bits.uop.robIdx.needFlush(io.redirect) || cancelPointerChasing) 780 781 782 // load s2 783 io.dcache.s2_kill := load_s2.io.dcache_kill // to kill mmio resp which are redirected 784 load_s2.io.dcacheResp <> io.dcache.resp 785 load_s2.io.pmpResp <> io.pmp 786 load_s2.io.static_pm := RegNext(io.tlb.resp.bits.static_pm) 787 load_s2.io.lsq.forwardData <> io.lsq.forward.forwardData 788 load_s2.io.lsq.forwardMask <> io.lsq.forward.forwardMask 789 load_s2.io.lsq.forwardMaskFast <> io.lsq.forward.forwardMaskFast // should not be used in load_s2 790 load_s2.io.lsq.dataInvalid <> io.lsq.forward.dataInvalid 791 load_s2.io.lsq.matchInvalid <> io.lsq.forward.matchInvalid 792 load_s2.io.sbuffer.forwardData <> io.sbuffer.forwardData 793 load_s2.io.sbuffer.forwardMask <> io.sbuffer.forwardMask 794 load_s2.io.sbuffer.forwardMaskFast <> io.sbuffer.forwardMaskFast // should not be used in load_s2 795 load_s2.io.sbuffer.dataInvalid <> io.sbuffer.dataInvalid // always false 796 load_s2.io.sbuffer.matchInvalid <> io.sbuffer.matchInvalid 797 load_s2.io.dataForwarded <> io.lsq.s2_load_data_forwarded 798 load_s2.io.dataInvalidSqIdx := io.lsq.forward.dataInvalidSqIdx // provide dataInvalidSqIdx to make wakeup faster 799 load_s2.io.loadViolationQueryResp <> io.lsq.loadViolationQuery.resp 800 load_s2.io.csrCtrl <> io.csrCtrl 801 load_s2.io.sentFastUop := io.fastUop.valid 802 load_s2.io.reExecuteQuery := io.reExecuteQuery 803 // feedback bank conflict / ld-vio check struct hazard to rs 804 io.feedbackFast.bits := RegNext(load_s1.io.rsFeedback.bits) 805 io.feedbackFast.valid := RegNext(load_s1.io.rsFeedback.valid && !load_s1.io.out.bits.uop.robIdx.needFlush(io.redirect)) 806 807 // pre-calcuate sqIdx mask in s0, then send it to lsq in s1 for forwarding 808 val sqIdxMaskReg = RegNext(UIntToMask(load_s0.io.s0_sqIdx.value, StoreQueueSize)) 809 // to enable load-load, sqIdxMask must be calculated based on ldin.uop 810 // If the timing here is not OK, load-load forwarding has to be disabled. 811 // Or we calculate sqIdxMask at RS?? 812 io.lsq.forward.sqIdxMask := sqIdxMaskReg 813 if (EnableLoadToLoadForward) { 814 when (s1_tryPointerChasing) { 815 io.lsq.forward.sqIdxMask := UIntToMask(io.ldin.bits.uop.sqIdx.value, StoreQueueSize) 816 } 817 } 818 819 // // use s2_hit_way to select data received in s1 820 // load_s2.io.dcacheResp.bits.data := Mux1H(RegNext(io.dcache.s1_hit_way), RegNext(io.dcache.s1_data)) 821 // assert(load_s2.io.dcacheResp.bits.data === io.dcache.resp.bits.data) 822 823 // now io.fastUop.valid is sent to RS in load_s2 824 val s2_dcache_hit = io.dcache.s2_hit // dcache hit dup in lsu side 825 826 io.fastUop.valid := RegNext( 827 !io.dcache.s1_disable_fast_wakeup && // load fast wakeup should be disabled when dcache data read is not ready 828 load_s1.io.in.valid && // valid load request 829 !load_s1.io.s1_kill && // killed by load-load forwarding 830 !load_s1.io.dtlbResp.bits.fast_miss && // not mmio or tlb miss, pf / af not included here 831 !io.lsq.forward.dataInvalidFast // forward failed 832 ) && 833 !RegNext(load_s1.io.needLdVioCheckRedo) && // load-load violation check: load paddr cam struct hazard 834 !RegNext(load_s1.io.needReExecute) && 835 !RegNext(load_s1.io.out.bits.uop.robIdx.needFlush(io.redirect)) && 836 (load_s2.io.in.valid && !load_s2.io.needReExecute && s2_dcache_hit) // dcache hit in lsu side 837 838 io.fastUop.bits := RegNext(load_s1.io.out.bits.uop) 839 840 XSDebug(load_s0.io.out.valid, 841 p"S0: pc ${Hexadecimal(load_s0.io.out.bits.uop.cf.pc)}, lId ${Hexadecimal(load_s0.io.out.bits.uop.lqIdx.asUInt)}, " + 842 p"vaddr ${Hexadecimal(load_s0.io.out.bits.vaddr)}, mask ${Hexadecimal(load_s0.io.out.bits.mask)}\n") 843 XSDebug(load_s1.io.out.valid, 844 p"S1: pc ${Hexadecimal(load_s1.io.out.bits.uop.cf.pc)}, lId ${Hexadecimal(load_s1.io.out.bits.uop.lqIdx.asUInt)}, tlb_miss ${io.tlb.resp.bits.miss}, " + 845 p"paddr ${Hexadecimal(load_s1.io.out.bits.paddr)}, mmio ${load_s1.io.out.bits.mmio}\n") 846 847 // writeback to LSQ 848 // Current dcache use MSHR 849 // Load queue will be updated at s2 for both hit/miss int/fp load 850 io.lsq.loadIn.valid := load_s2.io.out.valid 851 // generate LqWriteBundle from LsPipelineBundle 852 io.lsq.loadIn.bits.fromLsPipelineBundle(load_s2.io.out.bits) 853 854 io.lsq.replayFast := load_s1.io.replayFast 855 io.lsq.replaySlow := load_s2.io.replaySlow 856 io.lsq.replaySlow.valid := load_s2.io.replaySlow.valid && !load_s2.io.out.bits.uop.robIdx.needFlush(io.redirect) 857 858 // generate duplicated load queue data wen 859 val load_s2_valid_vec = RegInit(0.U(6.W)) 860 val load_s2_leftFire = load_s1.io.out.valid && load_s2.io.in.ready 861 // val write_lq_safe = load_s2.io.write_lq_safe 862 load_s2_valid_vec := 0x0.U(6.W) 863 when (load_s2_leftFire) { load_s2_valid_vec := 0x3f.U(6.W)} 864 when (load_s1.io.out.bits.uop.robIdx.needFlush(io.redirect)) { load_s2_valid_vec := 0x0.U(6.W) } 865 assert(RegNext(load_s2.io.in.valid === load_s2_valid_vec(0))) 866 io.lsq.loadIn.bits.lq_data_wen_dup := load_s2_valid_vec.asBools() 867 868 // s2_dcache_require_replay signal will be RegNexted, then used in s3 869 io.lsq.s2_dcache_require_replay := load_s2.io.s2_dcache_require_replay 870 871 // write to rob and writeback bus 872 val s2_wb_valid = load_s2.io.out.valid && !load_s2.io.out.bits.miss && !load_s2.io.out.bits.mmio 873 874 // Int load, if hit, will be writebacked at s2 875 val hitLoadOut = Wire(Valid(new ExuOutput)) 876 hitLoadOut.valid := s2_wb_valid 877 hitLoadOut.bits.uop := load_s2.io.out.bits.uop 878 hitLoadOut.bits.data := load_s2.io.out.bits.data 879 hitLoadOut.bits.redirectValid := false.B 880 hitLoadOut.bits.redirect := DontCare 881 hitLoadOut.bits.debug.isMMIO := load_s2.io.out.bits.mmio 882 hitLoadOut.bits.debug.isPerfCnt := false.B 883 hitLoadOut.bits.debug.paddr := load_s2.io.out.bits.paddr 884 hitLoadOut.bits.debug.vaddr := load_s2.io.out.bits.vaddr 885 hitLoadOut.bits.fflags := DontCare 886 887 load_s2.io.out.ready := true.B 888 889 // load s3 890 val s3_load_wb_meta_reg = RegNext(Mux(hitLoadOut.valid, hitLoadOut.bits, io.lsq.ldout.bits)) 891 892 // data from load queue refill 893 val s3_loadDataFromLQ = RegEnable(io.lsq.ldRawData, io.lsq.ldout.valid) 894 val s3_rdataLQ = s3_loadDataFromLQ.mergedData() 895 val s3_rdataSelLQ = LookupTree(s3_loadDataFromLQ.addrOffset, List( 896 "b000".U -> s3_rdataLQ(63, 0), 897 "b001".U -> s3_rdataLQ(63, 8), 898 "b010".U -> s3_rdataLQ(63, 16), 899 "b011".U -> s3_rdataLQ(63, 24), 900 "b100".U -> s3_rdataLQ(63, 32), 901 "b101".U -> s3_rdataLQ(63, 40), 902 "b110".U -> s3_rdataLQ(63, 48), 903 "b111".U -> s3_rdataLQ(63, 56) 904 )) 905 val s3_rdataPartialLoadLQ = rdataHelper(s3_loadDataFromLQ.uop, s3_rdataSelLQ) 906 907 // data from dcache hit 908 val s3_loadDataFromDcache = RegEnable(load_s2.io.loadDataFromDcache, load_s2.io.in.valid) 909 val s3_rdataDcache = s3_loadDataFromDcache.mergedData() 910 val s3_rdataSelDcache = LookupTree(s3_loadDataFromDcache.addrOffset, List( 911 "b000".U -> s3_rdataDcache(63, 0), 912 "b001".U -> s3_rdataDcache(63, 8), 913 "b010".U -> s3_rdataDcache(63, 16), 914 "b011".U -> s3_rdataDcache(63, 24), 915 "b100".U -> s3_rdataDcache(63, 32), 916 "b101".U -> s3_rdataDcache(63, 40), 917 "b110".U -> s3_rdataDcache(63, 48), 918 "b111".U -> s3_rdataDcache(63, 56) 919 )) 920 val s3_rdataPartialLoadDcache = rdataHelper(s3_loadDataFromDcache.uop, s3_rdataSelDcache) 921 922 io.ldout.bits := s3_load_wb_meta_reg 923 io.ldout.bits.data := Mux(RegNext(hitLoadOut.valid), s3_rdataPartialLoadDcache, s3_rdataPartialLoadLQ) 924 io.ldout.valid := RegNext(hitLoadOut.valid) && !RegNext(load_s2.io.out.bits.uop.robIdx.needFlush(io.redirect)) || 925 RegNext(io.lsq.ldout.valid) && !RegNext(io.lsq.ldout.bits.uop.robIdx.needFlush(io.redirect)) && !RegNext(hitLoadOut.valid) 926 927 io.ldout.bits.uop.cf.exceptionVec(loadAccessFault) := s3_load_wb_meta_reg.uop.cf.exceptionVec(loadAccessFault) || 928 RegNext(hitLoadOut.valid) && load_s2.io.s3_delayed_load_error 929 930 // fast load to load forward 931 io.fastpathOut.valid := RegNext(load_s2.io.out.valid) // for debug only 932 io.fastpathOut.data := s3_loadDataFromDcache.mergedData() // fastpath is for ld only 933 934 // feedback tlb miss / dcache miss queue full 935 io.feedbackSlow.bits := RegNext(load_s2.io.rsFeedback.bits) 936 io.feedbackSlow.valid := RegNext(load_s2.io.rsFeedback.valid && !load_s2.io.out.bits.uop.robIdx.needFlush(io.redirect)) 937 // If replay is reported at load_s1, inst will be canceled (will not enter load_s2), 938 // in that case: 939 // * replay should not be reported twice 940 assert(!(RegNext(io.feedbackFast.valid) && io.feedbackSlow.valid)) 941 // * io.fastUop.valid should not be reported 942 assert(!RegNext(io.feedbackFast.valid && !io.feedbackFast.bits.hit && io.fastUop.valid)) 943 944 // load forward_fail/ldld_violation check 945 // check for inst in load pipeline 946 val s3_forward_fail = RegNext(io.lsq.forward.matchInvalid || io.sbuffer.matchInvalid) 947 val s3_ldld_violation = RegNext( 948 io.lsq.loadViolationQuery.resp.valid && 949 io.lsq.loadViolationQuery.resp.bits.have_violation && 950 RegNext(io.csrCtrl.ldld_vio_check_enable) 951 ) 952 val s3_need_replay_from_fetch = s3_forward_fail || s3_ldld_violation 953 val s3_can_replay_from_fetch = RegEnable(load_s2.io.s2_can_replay_from_fetch, load_s2.io.out.valid) 954 // 1) use load pipe check result generated in load_s3 iff load_hit 955 when (RegNext(hitLoadOut.valid)) { 956 io.ldout.bits.uop.ctrl.replayInst := s3_need_replay_from_fetch 957 } 958 // 2) otherwise, write check result to load queue 959 io.lsq.s3_replay_from_fetch := s3_need_replay_from_fetch && s3_can_replay_from_fetch 960 961 // s3_delayed_load_error path is not used for now, as we writeback load result in load_s3 962 // but we keep this path for future use 963 io.s3_delayed_load_error := false.B 964 io.lsq.s3_delayed_load_error := false.B //load_s2.io.s3_delayed_load_error 965 966 io.lsq.ldout.ready := !hitLoadOut.valid 967 968 when(io.feedbackSlow.valid && !io.feedbackSlow.bits.hit){ 969 // when need replay from rs, inst should not be writebacked to rob 970 assert(RegNext(!hitLoadOut.valid)) 971 assert(RegNext(!io.lsq.loadIn.valid) || RegNext(load_s2.io.s2_dcache_require_replay)) 972 } 973 974 val lastValidData = RegEnable(io.ldout.bits.data, io.ldout.fire) 975 val hitLoadAddrTriggerHitVec = Wire(Vec(3, Bool())) 976 val lqLoadAddrTriggerHitVec = io.lsq.trigger.lqLoadAddrTriggerHitVec 977 (0 until 3).map{i => { 978 val tdata2 = io.trigger(i).tdata2 979 val matchType = io.trigger(i).matchType 980 val tEnable = io.trigger(i).tEnable 981 982 hitLoadAddrTriggerHitVec(i) := TriggerCmp(load_s2.io.out.bits.vaddr, tdata2, matchType, tEnable) 983 io.trigger(i).addrHit := Mux(hitLoadOut.valid, hitLoadAddrTriggerHitVec(i), lqLoadAddrTriggerHitVec(i)) 984 io.trigger(i).lastDataHit := TriggerCmp(lastValidData, tdata2, matchType, tEnable) 985 }} 986 io.lsq.trigger.hitLoadAddrTriggerHitVec := hitLoadAddrTriggerHitVec 987 988 val perfEvents = Seq( 989 ("load_s0_in_fire ", load_s0.io.in.fire ), 990 ("load_to_load_forward ", load_s1.io.out.valid && s1_tryPointerChasing && !cancelPointerChasing ), 991 ("stall_dcache ", load_s0.io.out.valid && load_s0.io.out.ready && !load_s0.io.dcacheReq.ready ), 992 ("load_s1_in_fire ", load_s1.io.in.fire ), 993 ("load_s1_tlb_miss ", load_s1.io.in.fire && load_s1.io.dtlbResp.bits.miss ), 994 ("load_s2_in_fire ", load_s2.io.in.fire ), 995 ("load_s2_dcache_miss ", load_s2.io.in.fire && load_s2.io.dcacheResp.bits.miss ), 996 ("load_s2_replay ", load_s2.io.rsFeedback.valid && !load_s2.io.rsFeedback.bits.hit ), 997 ("load_s2_replay_tlb_miss ", load_s2.io.rsFeedback.valid && !load_s2.io.rsFeedback.bits.hit && load_s2.io.in.bits.tlbMiss ), 998 ("load_s2_replay_cache ", load_s2.io.rsFeedback.valid && !load_s2.io.rsFeedback.bits.hit && !load_s2.io.in.bits.tlbMiss && load_s2.io.dcacheResp.bits.miss), 999 ) 1000 generatePerfEvent() 1001 1002 when(io.ldout.fire){ 1003 XSDebug("ldout %x\n", io.ldout.bits.uop.cf.pc) 1004 } 1005} 1006