1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.mem 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import utility._ 24import xiangshan.ExceptionNO._ 25import xiangshan._ 26import xiangshan.backend.Bundles.{MemExuInput, MemExuOutput} 27import xiangshan.backend.fu.PMPRespBundle 28import xiangshan.backend.fu.FuConfig._ 29import xiangshan.backend.fu.FuType._ 30import xiangshan.backend.ctrlblock.DebugLsInfoBundle 31import xiangshan.cache.mmu.{TlbCmd, TlbReq, TlbRequestIO, TlbResp} 32import xiangshan.cache.{DcacheStoreRequestIO, DCacheStoreIO, MemoryOpConstants, HasDCacheParameters, StorePrefetchReq} 33 34class StoreUnit(implicit p: Parameters) extends XSModule 35 with HasDCacheParameters 36 with HasVLSUParameters 37 { 38 val io = IO(new Bundle() { 39 val redirect = Flipped(ValidIO(new Redirect)) 40 val stin = Flipped(Decoupled(new MemExuInput)) 41 val issue = Valid(new MemExuInput) 42 val tlb = new TlbRequestIO() 43 val dcache = new DCacheStoreIO 44 val pmp = Flipped(new PMPRespBundle()) 45 val lsq = ValidIO(new LsPipelineBundle) 46 val lsq_replenish = Output(new LsPipelineBundle()) 47 val feedback_slow = ValidIO(new RSFeedback) 48 val prefetch_req = Flipped(DecoupledIO(new StorePrefetchReq)) 49 // provide prefetch info to sms 50 val prefetch_train = ValidIO(new StPrefetchTrainBundle()) 51 val stld_nuke_query = Valid(new StoreNukeQueryIO) 52 val stout = DecoupledIO(new MemExuOutput) // writeback store 53 val vecstout = DecoupledIO(new VecPipelineFeedbackIO(isVStore = true)) 54 // store mask, send to sq in store_s0 55 val st_mask_out = Valid(new StoreMaskBundle) 56 val debug_ls = Output(new DebugLsInfoBundle) 57 // vector 58 val vecstin = Flipped(Decoupled(new VecPipeBundle(isVStore = true))) 59 val vec_isFirstIssue = Input(Bool()) 60 }) 61 62 val s1_ready, s2_ready, s3_ready = WireInit(false.B) 63 64 // Pipeline 65 // -------------------------------------------------------------------------------- 66 // stage 0 67 // -------------------------------------------------------------------------------- 68 // generate addr, use addr to query DCache and DTLB 69 val s0_iss_valid = io.stin.valid 70 val s0_prf_valid = io.prefetch_req.valid && io.dcache.req.ready 71 val s0_vec_valid = io.vecstin.valid 72 val s0_valid = s0_iss_valid || s0_prf_valid || s0_vec_valid 73 val s0_use_flow_vec = s0_vec_valid 74 val s0_use_flow_rs = s0_iss_valid && !s0_vec_valid 75 val s0_use_flow_prf = !s0_iss_valid && !s0_vec_valid && s0_prf_valid 76 val s0_stin = Mux(s0_use_flow_rs, io.stin.bits, 0.U.asTypeOf(io.stin.bits)) 77 val s0_vecstin = Mux(s0_use_flow_vec, io.vecstin.bits, 0.U.asTypeOf(io.vecstin.bits)) 78 val s0_uop = Mux(s0_use_flow_rs, s0_stin.uop, s0_vecstin.uop) 79 val s0_isFirstIssue = s0_use_flow_rs && io.stin.bits.isFirstIssue || s0_use_flow_vec && io.vec_isFirstIssue 80 val s0_rsIdx = Mux(s0_use_flow_rs, io.stin.bits.iqIdx, 0.U) 81 val s0_size = Mux(s0_use_flow_rs || s0_use_flow_vec, s0_uop.fuOpType(2,0), 0.U)// may broken if use it in feature 82 val s0_mem_idx = Mux(s0_use_flow_rs || s0_use_flow_vec, s0_uop.sqIdx.value, 0.U) 83 val s0_rob_idx = Mux(s0_use_flow_rs || s0_use_flow_vec, s0_uop.robIdx, 0.U.asTypeOf(s0_uop.robIdx)) 84 val s0_pc = Mux(s0_use_flow_rs || s0_use_flow_vec, s0_uop.pc, 0.U) 85 val s0_instr_type = Mux(s0_use_flow_rs || s0_use_flow_vec, STORE_SOURCE.U, DCACHE_PREFETCH_SOURCE.U) 86 val s0_wlineflag = Mux(s0_use_flow_rs, s0_uop.fuOpType === LSUOpType.cbo_zero, false.B) 87 val s0_out = Wire(new LsPipelineBundle) 88 val s0_kill = s0_uop.robIdx.needFlush(io.redirect) 89 val s0_can_go = s1_ready 90 val s0_fire = s0_valid && !s0_kill && s0_can_go 91 val s0_is128bit = is128Bit(s0_vecstin.alignedType) 92 // vector 93 val s0_vecActive = !s0_use_flow_vec || s0_vecstin.vecActive 94 // val s0_flowPtr = s0_vecstin.flowPtr 95 // val s0_isLastElem = s0_vecstin.isLastElem 96 val s0_secondInv = s0_vecstin.usSecondInv 97 val s0_elemIdx = s0_vecstin.elemIdx 98 val s0_alignedType = s0_vecstin.alignedType 99 val s0_mBIndex = s0_vecstin.mBIndex 100 101 // generate addr 102 // val saddr = s0_in.bits.src(0) + SignExt(s0_in.bits.uop.imm(11,0), VAddrBits) 103 val imm12 = WireInit(s0_uop.imm(11,0)) 104 val saddr_lo = s0_stin.src(0)(11,0) + Cat(0.U(1.W), imm12) 105 val saddr_hi = Mux(saddr_lo(12), 106 Mux(imm12(11), s0_stin.src(0)(VAddrBits-1, 12), s0_stin.src(0)(VAddrBits-1, 12)+1.U), 107 Mux(imm12(11), s0_stin.src(0)(VAddrBits-1, 12)+SignExt(1.U, VAddrBits-12), s0_stin.src(0)(VAddrBits-1, 12)), 108 ) 109 val s0_saddr = Cat(saddr_hi, saddr_lo(11,0)) 110 val s0_vaddr = Mux( 111 s0_use_flow_rs, 112 s0_saddr, 113 Mux( 114 s0_use_flow_vec, 115 s0_vecstin.vaddr, 116 io.prefetch_req.bits.vaddr 117 ) 118 ) 119 val s0_mask = Mux( 120 s0_use_flow_rs, 121 genVWmask128(s0_saddr, s0_uop.fuOpType(2,0)), 122 Mux( 123 s0_use_flow_vec, 124 s0_vecstin.mask, 125 // -1.asSInt.asUInt 126 Fill(VLEN/8, 1.U(1.W)) 127 ) 128 ) 129 130 io.tlb.req.valid := s0_valid 131 io.tlb.req.bits.vaddr := s0_vaddr 132 io.tlb.req.bits.cmd := TlbCmd.write 133 io.tlb.req.bits.size := s0_size 134 io.tlb.req.bits.kill := false.B 135 io.tlb.req.bits.memidx.is_ld := false.B 136 io.tlb.req.bits.memidx.is_st := true.B 137 io.tlb.req.bits.memidx.idx := s0_mem_idx 138 io.tlb.req.bits.debug.robIdx := s0_rob_idx 139 io.tlb.req.bits.no_translate := false.B 140 io.tlb.req.bits.debug.pc := s0_pc 141 io.tlb.req.bits.debug.isFirstIssue := s0_isFirstIssue 142 io.tlb.req_kill := false.B 143 io.tlb.req.bits.hyperinst := LSUOpType.isHsv(s0_uop.fuOpType) 144 io.tlb.req.bits.hlvx := false.B 145 146 // Dcache access here: not **real** dcache write 147 // just read meta and tag in dcache, to find out the store will hit or miss 148 149 // NOTE: The store request does not wait for the dcache to be ready. 150 // If the dcache is not ready at this time, the dcache is not queried. 151 // But, store prefetch request will always wait for dcache to be ready to make progress. 152 io.dcache.req.valid := s0_fire 153 io.dcache.req.bits.cmd := MemoryOpConstants.M_PFW 154 io.dcache.req.bits.vaddr := s0_vaddr 155 io.dcache.req.bits.instrtype := s0_instr_type 156 157 s0_out := DontCare 158 s0_out.vaddr := s0_vaddr 159 // Now data use its own io 160 // s1_out.data := genWdata(s1_in.src(1), s1_in.uop.fuOpType(1,0)) 161 s0_out.data := s0_stin.src(1) 162 s0_out.uop := s0_uop 163 s0_out.miss := false.B 164 s0_out.rsIdx := s0_rsIdx 165 s0_out.mask := s0_mask 166 s0_out.isFirstIssue := s0_isFirstIssue 167 s0_out.isHWPrefetch := s0_use_flow_prf 168 s0_out.wlineflag := s0_wlineflag 169 s0_out.isvec := s0_use_flow_vec 170 s0_out.is128bit := s0_is128bit 171 s0_out.vecActive := s0_vecActive 172 s0_out.usSecondInv := s0_secondInv 173 s0_out.elemIdx := s0_elemIdx 174 s0_out.alignedType := s0_alignedType 175 s0_out.mbIndex := s0_mBIndex 176 when(s0_valid && s0_isFirstIssue) { 177 s0_out.uop.debugInfo.tlbFirstReqTime := GTimer() 178 } 179 180 // exception check 181 val s0_addr_aligned = LookupTree(Mux(s0_use_flow_vec, s0_vecstin.alignedType(1,0), s0_uop.fuOpType(1, 0)), List( 182 "b00".U -> true.B, //b 183 "b01".U -> (s0_out.vaddr(0) === 0.U), //h 184 "b10".U -> (s0_out.vaddr(1,0) === 0.U), //w 185 "b11".U -> (s0_out.vaddr(2,0) === 0.U) //d 186 )) 187 // if vector store sends 128-bit requests, its address must be 128-aligned 188 XSError(s0_use_flow_vec && s0_out.vaddr(3, 0) =/= 0.U && s0_vecstin.alignedType(2), "unit stride 128 bit element is not aligned!") 189 s0_out.uop.exceptionVec(storeAddrMisaligned) := Mux(s0_use_flow_rs || s0_use_flow_vec, !s0_addr_aligned, false.B) 190 191 io.st_mask_out.valid := s0_use_flow_rs || s0_use_flow_vec 192 io.st_mask_out.bits.mask := s0_out.mask 193 io.st_mask_out.bits.sqIdx := s0_out.uop.sqIdx 194 195 io.stin.ready := s1_ready && s0_use_flow_rs 196 io.vecstin.ready := s1_ready && s0_use_flow_vec 197 io.prefetch_req.ready := s1_ready && io.dcache.req.ready && !s0_iss_valid && !s0_vec_valid 198 199 // Pipeline 200 // -------------------------------------------------------------------------------- 201 // stage 1 202 // -------------------------------------------------------------------------------- 203 // TLB resp (send paddr to dcache) 204 val s1_valid = RegInit(false.B) 205 val s1_in = RegEnable(s0_out, s0_fire) 206 val s1_out = Wire(new LsPipelineBundle) 207 val s1_kill = Wire(Bool()) 208 val s1_can_go = s2_ready 209 val s1_fire = s1_valid && !s1_kill && s1_can_go 210 val s1_vecActive = RegEnable(s0_out.vecActive, true.B, s0_fire) 211 212 // mmio cbo decoder 213 val s1_mmio_cbo = s1_in.uop.fuOpType === LSUOpType.cbo_clean || 214 s1_in.uop.fuOpType === LSUOpType.cbo_flush || 215 s1_in.uop.fuOpType === LSUOpType.cbo_inval 216 val s1_paddr = io.tlb.resp.bits.paddr(0) 217 val s1_gpaddr = io.tlb.resp.bits.gpaddr(0) 218 val s1_tlb_miss = io.tlb.resp.bits.miss 219 val s1_mmio = s1_mmio_cbo 220 val s1_exception = ExceptionNO.selectByFu(s1_out.uop.exceptionVec, StaCfg).asUInt.orR 221 val s1_isvec = RegEnable(s0_out.isvec, false.B, s0_fire) 222 // val s1_isLastElem = RegEnable(s0_isLastElem, false.B, s0_fire) 223 s1_kill := s1_in.uop.robIdx.needFlush(io.redirect) || (s1_tlb_miss && !s1_isvec) 224 225 s1_ready := !s1_valid || s1_kill || s2_ready 226 io.tlb.resp.ready := true.B // TODO: why dtlbResp needs a ready? 227 when (s0_fire) { s1_valid := true.B } 228 .elsewhen (s1_fire) { s1_valid := false.B } 229 .elsewhen (s1_kill) { s1_valid := false.B } 230 231 // st-ld violation dectect request. 232 io.stld_nuke_query.valid := s1_valid && !s1_tlb_miss && !s1_in.isHWPrefetch 233 io.stld_nuke_query.bits.robIdx := s1_in.uop.robIdx 234 io.stld_nuke_query.bits.paddr := s1_paddr 235 io.stld_nuke_query.bits.mask := s1_in.mask 236 io.stld_nuke_query.bits.matchLine := s1_in.isvec && s1_in.is128bit 237 238 // issue 239 io.issue.valid := s1_valid && !s1_tlb_miss && !s1_in.isHWPrefetch && !s1_isvec 240 io.issue.bits := RegEnable(s0_stin, s0_valid) 241 242 243 // Send TLB feedback to store issue queue 244 // Store feedback is generated in store_s1, sent to RS in store_s2 245 val s1_feedback = Wire(Valid(new RSFeedback)) 246 s1_feedback.valid := s1_valid & !s1_in.isHWPrefetch 247 s1_feedback.bits.hit := !s1_tlb_miss 248 s1_feedback.bits.flushState := io.tlb.resp.bits.ptwBack 249 s1_feedback.bits.robIdx := s1_out.uop.robIdx 250 s1_feedback.bits.sourceType := RSFeedbackType.tlbMiss 251 s1_feedback.bits.dataInvalidSqIdx := DontCare 252 253 XSDebug(s1_feedback.valid, 254 "S1 Store: tlbHit: %d robIdx: %d\n", 255 s1_feedback.bits.hit, 256 s1_feedback.bits.robIdx.value 257 ) 258 259 // io.feedback_slow := s1_feedback 260 261 // get paddr from dtlb, check if rollback is needed 262 // writeback store inst to lsq 263 s1_out := s1_in 264 s1_out.paddr := s1_paddr 265 s1_out.gpaddr := s1_gpaddr 266 s1_out.miss := false.B 267 s1_out.mmio := s1_mmio 268 s1_out.tlbMiss := s1_tlb_miss 269 s1_out.atomic := s1_mmio 270 s1_out.uop.exceptionVec(storePageFault) := io.tlb.resp.bits.excp(0).pf.st && s1_vecActive 271 s1_out.uop.exceptionVec(storeAccessFault) := io.tlb.resp.bits.excp(0).af.st && s1_vecActive 272 s1_out.uop.exceptionVec(storeGuestPageFault) := io.tlb.resp.bits.excp(0).gpf.st && s1_vecActive 273 274 // scalar store and scalar load nuke check, and also other purposes 275 io.lsq.valid := s1_valid && !s1_in.isHWPrefetch 276 io.lsq.bits := s1_out 277 io.lsq.bits.miss := s1_tlb_miss 278 279 // kill dcache write intent request when tlb miss or exception 280 io.dcache.s1_kill := (s1_tlb_miss || s1_exception || s1_mmio || s1_in.uop.robIdx.needFlush(io.redirect)) 281 io.dcache.s1_paddr := s1_paddr 282 283 // write below io.out.bits assign sentence to prevent overwriting values 284 val s1_tlb_memidx = io.tlb.resp.bits.memidx 285 when(s1_tlb_memidx.is_st && io.tlb.resp.valid && !s1_tlb_miss && s1_tlb_memidx.idx === s1_out.uop.sqIdx.value) { 286 // printf("Store idx = %d\n", s1_tlb_memidx.idx) 287 s1_out.uop.debugInfo.tlbRespTime := GTimer() 288 } 289 290 // Pipeline 291 // -------------------------------------------------------------------------------- 292 // stage 2 293 // -------------------------------------------------------------------------------- 294 // mmio check 295 val s2_valid = RegInit(false.B) 296 val s2_in = RegEnable(s1_out, s1_fire) 297 val s2_out = Wire(new LsPipelineBundle) 298 val s2_kill = Wire(Bool()) 299 val s2_can_go = s3_ready 300 val s2_fire = s2_valid && !s2_kill && s2_can_go 301 val s2_vecActive = RegEnable(s1_out.vecActive, true.B, s1_fire) 302 303 s2_ready := !s2_valid || s2_kill || s3_ready 304 when (s1_fire) { s2_valid := true.B } 305 .elsewhen (s2_fire) { s2_valid := false.B } 306 .elsewhen (s2_kill) { s2_valid := false.B } 307 308 val s2_pmp = WireInit(io.pmp) 309 310 val s2_exception = (ExceptionNO.selectByFu(s2_out.uop.exceptionVec, StaCfg).asUInt.orR) && RegNext(s1_feedback.bits.hit) 311 val s2_mmio = (s2_in.mmio || s2_pmp.mmio) && RegNext(s1_feedback.bits.hit) 312 s2_kill := ((s2_mmio && !s2_exception) && !s2_in.isvec) || s2_in.uop.robIdx.needFlush(io.redirect) 313 314 s2_out := s2_in 315 s2_out.af := s2_pmp.st && !s2_in.isvec 316 s2_out.mmio := s2_mmio && !s2_exception 317 s2_out.atomic := s2_in.atomic || s2_pmp.atomic 318 s2_out.uop.exceptionVec(storeAccessFault) := (s2_in.uop.exceptionVec(storeAccessFault) || s2_pmp.st) && s2_vecActive 319 320 // kill dcache write intent request when mmio or exception 321 io.dcache.s2_kill := (s2_mmio || s2_exception || s2_in.uop.robIdx.needFlush(io.redirect)) 322 io.dcache.s2_pc := s2_out.uop.pc 323 // TODO: dcache resp 324 io.dcache.resp.ready := true.B 325 326 // feedback tlb miss to RS in store_s2 327 io.feedback_slow.valid := RegNext(s1_feedback.valid && !s1_out.uop.robIdx.needFlush(io.redirect)) && !RegNext(s1_out.isvec) 328 io.feedback_slow.bits := RegNext(s1_feedback.bits) 329 330 val s2_vecFeedback = RegNext(!s1_out.uop.robIdx.needFlush(io.redirect) && s1_feedback.bits.hit) && s2_in.isvec 331 332 // mmio and exception 333 io.lsq_replenish := s2_out 334 335 // prefetch related 336 io.lsq_replenish.miss := io.dcache.resp.fire && io.dcache.resp.bits.miss // miss info 337 338 // RegNext prefetch train for better timing 339 // ** Now, prefetch train is valid at store s3 ** 340 io.prefetch_train.bits.fromLsPipelineBundle(s2_in, latch = true) 341 // override miss bit 342 io.prefetch_train.bits.miss := RegNext(io.dcache.resp.bits.miss) 343 // TODO: add prefetch and access bit 344 io.prefetch_train.bits.meta_prefetch := false.B 345 io.prefetch_train.bits.meta_access := false.B 346 if(EnableStorePrefetchSMS) { 347 io.prefetch_train.valid := RegNext(s2_valid && io.dcache.resp.fire && !s2_out.mmio && !s2_in.tlbMiss && !s2_in.isHWPrefetch) 348 }else { 349 io.prefetch_train.valid := false.B 350 } 351 352 // Pipeline 353 // -------------------------------------------------------------------------------- 354 // stage 3 355 // -------------------------------------------------------------------------------- 356 // store write back 357 val s3_valid = RegInit(false.B) 358 val s3_in = RegEnable(s2_out, s2_fire) 359 val s3_out = Wire(new MemExuOutput(isVector = true)) 360 val s3_kill = s3_in.uop.robIdx.needFlush(io.redirect) 361 val s3_can_go = s3_ready 362 val s3_fire = s3_valid && !s3_kill && s3_can_go 363 val s3_vecFeedback = RegEnable(s2_vecFeedback, s2_fire) 364 365 when (s2_fire) { s3_valid := (!s2_mmio || s2_exception) && !s2_out.isHWPrefetch } 366 .elsewhen (s3_fire) { s3_valid := false.B } 367 .elsewhen (s3_kill) { s3_valid := false.B } 368 369 // wb: writeback 370 val SelectGroupSize = RollbackGroupSize 371 val lgSelectGroupSize = log2Ceil(SelectGroupSize) 372 val TotalSelectCycles = scala.math.ceil(log2Ceil(LoadQueueRAWSize).toFloat / lgSelectGroupSize).toInt + 1 373 374 s3_out := DontCare 375 s3_out.uop := s3_in.uop 376 s3_out.data := DontCare 377 s3_out.debug.isMMIO := s3_in.mmio 378 s3_out.debug.paddr := s3_in.paddr 379 s3_out.debug.vaddr := s3_in.vaddr 380 s3_out.debug.isPerfCnt := false.B 381 382 // Pipeline 383 // -------------------------------------------------------------------------------- 384 // stage x 385 // -------------------------------------------------------------------------------- 386 // delay TotalSelectCycles - 2 cycle(s) 387 val TotalDelayCycles = TotalSelectCycles - 2 388 val sx_valid = Wire(Vec(TotalDelayCycles + 1, Bool())) 389 val sx_ready = Wire(Vec(TotalDelayCycles + 1, Bool())) 390 val sx_in = Wire(Vec(TotalDelayCycles + 1, new VecMemExuOutput(isVector = true))) 391 392 // backward ready signal 393 s3_ready := sx_ready.head 394 for (i <- 0 until TotalDelayCycles + 1) { 395 if (i == 0) { 396 sx_valid(i) := s3_valid 397 sx_in(i).output := s3_out 398 sx_in(i).vecFeedback := s3_vecFeedback 399 sx_in(i).mmio := s3_in.mmio 400 sx_in(i).usSecondInv := s3_in.usSecondInv 401 sx_in(i).elemIdx := s3_in.elemIdx 402 sx_in(i).alignedType := s3_in.alignedType 403 sx_in(i).mbIndex := s3_in.mbIndex 404 sx_in(i).mask := s3_in.mask 405 sx_in(i).vaddr := s3_in.vaddr 406 sx_ready(i) := !s3_valid(i) || sx_in(i).output.uop.robIdx.needFlush(io.redirect) || (if (TotalDelayCycles == 0) io.stout.ready else sx_ready(i+1)) 407 } else { 408 val cur_kill = sx_in(i).output.uop.robIdx.needFlush(io.redirect) 409 val cur_can_go = (if (i == TotalDelayCycles) io.stout.ready else sx_ready(i+1)) 410 val cur_fire = sx_valid(i) && !cur_kill && cur_can_go 411 val prev_fire = sx_valid(i-1) && !sx_in(i-1).output.uop.robIdx.needFlush(io.redirect) && sx_ready(i) 412 413 sx_ready(i) := !sx_valid(i) || cur_kill || (if (i == TotalDelayCycles) io.stout.ready else sx_ready(i+1)) 414 val sx_valid_can_go = prev_fire || cur_fire || cur_kill 415 sx_valid(i) := RegEnable(Mux(prev_fire, true.B, false.B), false.B, sx_valid_can_go) 416 sx_in(i) := RegEnable(sx_in(i-1), prev_fire) 417 } 418 } 419 val sx_last_valid = sx_valid.takeRight(1).head 420 val sx_last_ready = sx_ready.takeRight(1).head 421 val sx_last_in = sx_in.takeRight(1).head 422 sx_last_ready := !sx_last_valid || sx_last_in.output.uop.robIdx.needFlush(io.redirect) || io.stout.ready 423 424 io.stout.valid := sx_last_valid && !sx_last_in.output.uop.robIdx.needFlush(io.redirect) && isStore(sx_last_in.output.uop.fuType) 425 io.stout.bits := sx_last_in.output 426 427 io.vecstout.valid := sx_last_valid && !sx_last_in.output.uop.robIdx.needFlush(io.redirect) && isVStore(sx_last_in.output.uop.fuType) 428 // TODO: implement it! 429 io.vecstout.bits.mBIndex := sx_last_in.mbIndex 430 io.vecstout.bits.hit := sx_last_in.vecFeedback 431 io.vecstout.bits.isvec := true.B 432 io.vecstout.bits.sourceType := RSFeedbackType.tlbMiss 433 io.vecstout.bits.flushState := DontCare 434 io.vecstout.bits.mmio := sx_last_in.mmio 435 io.vecstout.bits.exceptionVec := sx_last_in.output.uop.exceptionVec 436 io.vecstout.bits.usSecondInv := sx_last_in.usSecondInv 437 io.vecstout.bits.vecFeedback := sx_last_in.vecFeedback 438 io.vecstout.bits.elemIdx := sx_last_in.elemIdx 439 io.vecstout.bits.alignedType := sx_last_in.alignedType 440 io.vecstout.bits.mask := sx_last_in.mask 441 io.vecstout.bits.vaddr := sx_last_in.vaddr 442 // io.vecstout.bits.reg_offset.map(_ := DontCare) 443 // io.vecstout.bits.elemIdx.map(_ := sx_last_in.elemIdx) 444 // io.vecstout.bits.elemIdxInsideVd.map(_ := DontCare) 445 // io.vecstout.bits.vecdata.map(_ := DontCare) 446 // io.vecstout.bits.mask.map(_ := DontCare) 447 // io.vecstout.bits.alignedType.map(_ := sx_last_in.alignedType) 448 449 io.debug_ls := DontCare 450 io.debug_ls.s1_robIdx := s1_in.uop.robIdx.value 451 io.debug_ls.s1_isTlbFirstMiss := io.tlb.resp.valid && io.tlb.resp.bits.miss && io.tlb.resp.bits.debug.isFirstIssue && !s1_in.isHWPrefetch 452 453 private def printPipeLine(pipeline: LsPipelineBundle, cond: Bool, name: String): Unit = { 454 XSDebug(cond, 455 p"$name" + p" pc ${Hexadecimal(pipeline.uop.pc)} " + 456 p"addr ${Hexadecimal(pipeline.vaddr)} -> ${Hexadecimal(pipeline.paddr)} " + 457 p"op ${Binary(pipeline.uop.fuOpType)} " + 458 p"data ${Hexadecimal(pipeline.data)} " + 459 p"mask ${Hexadecimal(pipeline.mask)}\n" 460 ) 461 } 462 463 printPipeLine(s0_out, s0_valid, "S0") 464 printPipeLine(s1_out, s1_valid, "S1") 465 466 // perf cnt 467 XSPerfAccumulate("s0_in_valid", s0_valid) 468 XSPerfAccumulate("s0_in_fire", s0_fire) 469 XSPerfAccumulate("s0_vecin_fire", s0_fire && s0_use_flow_vec) 470 XSPerfAccumulate("s0_in_fire_first_issue", s0_fire && s0_isFirstIssue) 471 XSPerfAccumulate("s0_addr_spec_success", s0_fire && !s0_use_flow_vec && s0_saddr(VAddrBits-1, 12) === s0_stin.src(0)(VAddrBits-1, 12)) 472 XSPerfAccumulate("s0_addr_spec_failed", s0_fire && !s0_use_flow_vec && s0_saddr(VAddrBits-1, 12) =/= s0_stin.src(0)(VAddrBits-1, 12)) 473 XSPerfAccumulate("s0_addr_spec_success_once", s0_fire && !s0_use_flow_vec && s0_saddr(VAddrBits-1, 12) === s0_stin.src(0)(VAddrBits-1, 12) && s0_isFirstIssue) 474 XSPerfAccumulate("s0_addr_spec_failed_once", s0_fire && !s0_use_flow_vec && s0_saddr(VAddrBits-1, 12) =/= s0_stin.src(0)(VAddrBits-1, 12) && s0_isFirstIssue) 475 476 XSPerfAccumulate("s1_in_valid", s1_valid) 477 XSPerfAccumulate("s1_in_fire", s1_fire) 478 XSPerfAccumulate("s1_in_fire_first_issue", s1_fire && s1_in.isFirstIssue) 479 XSPerfAccumulate("s1_tlb_miss", s1_fire && s1_tlb_miss) 480 XSPerfAccumulate("s1_tlb_miss_first_issue", s1_fire && s1_tlb_miss && s1_in.isFirstIssue) 481 // end 482}