1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.frontend.icache 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import difftest._ 23import freechips.rocketchip.tilelink.ClientStates 24import xiangshan._ 25import xiangshan.cache.mmu._ 26import utils._ 27import utility._ 28import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle} 29import xiangshan.frontend.{FtqICacheInfo, FtqToICacheRequestBundle} 30 31class ICacheMainPipeReq(implicit p: Parameters) extends ICacheBundle 32{ 33 val vaddr = UInt(VAddrBits.W) 34 def vsetIdx = get_idx(vaddr) 35} 36 37class ICacheMainPipeResp(implicit p: Parameters) extends ICacheBundle 38{ 39 val vaddr = UInt(VAddrBits.W) 40 val registerData = UInt(blockBits.W) 41 val sramData = UInt(blockBits.W) 42 val select = Bool() 43 val paddr = UInt(PAddrBits.W) 44 val tlbExcp = new Bundle{ 45 val pageFault = Bool() 46 val accessFault = Bool() 47 val mmio = Bool() 48 } 49} 50 51class ICacheMainPipeBundle(implicit p: Parameters) extends ICacheBundle 52{ 53 val req = Flipped(Decoupled(new FtqToICacheRequestBundle)) 54 val resp = Vec(PortNumber, ValidIO(new ICacheMainPipeResp)) 55 val topdownIcacheMiss = Output(Bool()) 56 val topdownItlbMiss = Output(Bool()) 57} 58 59class ICacheMetaReqBundle(implicit p: Parameters) extends ICacheBundle{ 60 val toIMeta = DecoupledIO(new ICacheReadBundle) 61 val fromIMeta = Input(new ICacheMetaRespBundle) 62} 63 64class ICacheDataReqBundle(implicit p: Parameters) extends ICacheBundle{ 65 val toIData = DecoupledIO(Vec(partWayNum, new ICacheReadBundle)) 66 val fromIData = Input(new ICacheDataRespBundle) 67} 68 69class ICacheMSHRBundle(implicit p: Parameters) extends ICacheBundle{ 70 val toMSHR = Decoupled(new ICacheMissReq) 71 val fromMSHR = Flipped(ValidIO(new ICacheMissResp)) 72} 73 74class ICachePMPBundle(implicit p: Parameters) extends ICacheBundle{ 75 val req = Valid(new PMPReqBundle()) 76 val resp = Input(new PMPRespBundle()) 77} 78 79class ICachePerfInfo(implicit p: Parameters) extends ICacheBundle{ 80 val only_0_hit = Bool() 81 val only_0_miss = Bool() 82 val hit_0_hit_1 = Bool() 83 val hit_0_miss_1 = Bool() 84 val miss_0_hit_1 = Bool() 85 val miss_0_miss_1 = Bool() 86 val hit_0_except_1 = Bool() 87 val miss_0_except_1 = Bool() 88 val except_0 = Bool() 89 val bank_hit = Vec(2,Bool()) 90 val hit = Bool() 91} 92 93class ICacheMainPipeInterface(implicit p: Parameters) extends ICacheBundle { 94 val hartId = Input(UInt(8.W)) 95 /*** internal interface ***/ 96 val metaArray = new ICacheMetaReqBundle 97 val dataArray = new ICacheDataReqBundle 98 /** prefetch io */ 99 val IPFBufferRead = Flipped(new IPFBufferRead) 100 val PIQRead = Flipped(new PIQRead) 101 102 val IPFReplacer = Flipped(new IPFReplacer) 103 val ICacheMainPipeInfo = new ICacheMainPipeInfo 104 105 val mshr = Vec(PortNumber, new ICacheMSHRBundle) 106 val errors = Output(Vec(PortNumber, new L1CacheErrorInfo)) 107 /*** outside interface ***/ 108 //val fetch = Vec(PortNumber, new ICacheMainPipeBundle) 109 /* when ftq.valid is high in T + 1 cycle 110 * the ftq component must be valid in T cycle 111 */ 112 val fetch = new ICacheMainPipeBundle 113 val pmp = Vec(PortNumber, new ICachePMPBundle) 114 val itlb = Vec(PortNumber, new TlbRequestIO) 115 val respStall = Input(Bool()) 116 val perfInfo = Output(new ICachePerfInfo) 117 118 val csr_parity_enable = Input(Bool()) 119} 120 121class ICacheMainPipe(implicit p: Parameters) extends ICacheModule 122{ 123 val io = IO(new ICacheMainPipeInterface) 124 125 /** Input/Output port */ 126 val (fromFtq, toIFU) = (io.fetch.req, io.fetch.resp) 127 val (toMeta, metaResp) = (io.metaArray.toIMeta, io.metaArray.fromIMeta) 128 val (toData, dataResp) = (io.dataArray.toIData, io.dataArray.fromIData) 129 val (toIPF, fromIPF) = (io.IPFBufferRead.req, io.IPFBufferRead.resp) 130 val (toPIQ, fromPIQ) = (io.PIQRead.req, io.PIQRead.resp) 131 val (toMSHR, fromMSHR) = (io.mshr.map(_.toMSHR), io.mshr.map(_.fromMSHR)) 132 val (toITLB, fromITLB) = (io.itlb.map(_.req), io.itlb.map(_.resp)) 133 val (toPMP, fromPMP) = (io.pmp.map(_.req), io.pmp.map(_.resp)) 134 135 val IPFReplacer = io.IPFReplacer 136 val toIPrefetch = io.ICacheMainPipeInfo 137 138 139 // Statistics on the frequency distribution of FTQ fire interval 140 val cntFtqFireInterval = RegInit(0.U(32.W)) 141 cntFtqFireInterval := Mux(fromFtq.fire, 1.U, cntFtqFireInterval + 1.U) 142 XSPerfHistogram("ftq2icache_fire_" + p(XSCoreParamsKey).HartId.toString, 143 cntFtqFireInterval, fromFtq.fire, 144 1, 300, 1, right_strict = true) 145 146 // Ftq RegNext Register 147 val fromFtqReq = fromFtq.bits.pcMemRead 148 149 /** pipeline control signal */ 150 val s1_ready, s2_ready = Wire(Bool()) 151 val s0_fire, s1_fire , s2_fire = Wire(Bool()) 152 153 val missSwitchBit = RegInit(false.B) 154 155 /** replacement status register */ 156 val touch_sets = Seq.fill(2)(Wire(Vec(2, UInt(log2Ceil(nSets/2).W)))) 157 val touch_ways = Seq.fill(2)(Wire(Vec(2, Valid(UInt(log2Ceil(nWays).W)))) ) 158 159 /** 160 ****************************************************************************** 161 * ICache Stage 0 162 * - send req to ITLB and wait for tlb miss fixing 163 * - send req to Meta/Data SRAM 164 ****************************************************************************** 165 */ 166 167 /** s0 control */ 168 val s0_valid = fromFtq.valid 169 val s0_req_vaddr = (0 until partWayNum + 1).map(i => VecInit(Seq(fromFtqReq(i).startAddr, fromFtqReq(i).nextlineStart))) 170 val s0_req_vsetIdx = (0 until partWayNum + 1).map(i => VecInit(s0_req_vaddr(i).map(get_idx(_)))) 171 val s0_only_first = (0 until partWayNum + 1).map(i => fromFtq.bits.readValid(i) && !fromFtqReq(i).crossCacheline) 172 val s0_double_line = (0 until partWayNum + 1).map(i => fromFtq.bits.readValid(i) && fromFtqReq(i).crossCacheline) 173 174 val s0_final_valid = s0_valid 175 val s0_final_vaddr = s0_req_vaddr.head 176 val s0_final_vsetIdx = s0_req_vsetIdx.head 177 val s0_final_only_first = s0_only_first.head 178 val s0_final_double_line = s0_double_line.head 179 180 /** SRAM request */ 181 //0 -> metaread, 1,2,3 -> data, 3 -> code 4 -> itlb 182 // TODO: it seems like 0,1,2,3 -> dataArray(data); 3 -> dataArray(code); 0 -> metaArray; 4 -> itlb 183 val ftq_req_to_data_doubleline = s0_double_line.init 184 val ftq_req_to_data_vset_idx = s0_req_vsetIdx.init 185 val ftq_req_to_data_valid = fromFtq.bits.readValid.init 186 187 val ftq_req_to_meta_doubleline = s0_double_line.head 188 val ftq_req_to_meta_vset_idx = s0_req_vsetIdx.head 189 190 val ftq_req_to_itlb_only_first = s0_only_first.last 191 val ftq_req_to_itlb_doubleline = s0_double_line.last 192 val ftq_req_to_itlb_vaddr = s0_req_vaddr.last 193 val ftq_req_to_itlb_vset_idx = s0_req_vsetIdx.last 194 195 /** Data request */ 196 for(i <- 0 until partWayNum) { 197 toData.valid := ftq_req_to_data_valid(i) && !missSwitchBit 198 toData.bits(i).isDoubleLine := ftq_req_to_data_doubleline(i) 199 toData.bits(i).vSetIdx := ftq_req_to_data_vset_idx(i) 200 } 201 202 /** Meta request */ 203 toMeta.valid := s0_valid && !missSwitchBit 204 toMeta.bits.isDoubleLine := ftq_req_to_meta_doubleline 205 toMeta.bits.vSetIdx := ftq_req_to_meta_vset_idx 206 207 val toITLB_s0_valid = VecInit(Seq(s0_valid, s0_valid && ftq_req_to_itlb_doubleline)) 208 val toITLB_s0_size = VecInit(Seq(3.U, 3.U)) // TODO: fix the size 209 val toITLB_s0_vaddr = ftq_req_to_itlb_vaddr 210 val toITLB_s0_debug_pc = ftq_req_to_itlb_vaddr 211 212 val itlb_can_go = toITLB(0).ready && toITLB(1).ready 213 val icache_can_go = toData.ready && toMeta.ready 214 val pipe_can_go = !missSwitchBit && s1_ready 215 val s0_can_go = itlb_can_go && icache_can_go && pipe_can_go 216 s0_fire := s0_valid && s0_can_go 217 218 //TODO: fix GTimer() condition 219 fromFtq.ready := s0_can_go 220 221 /** 222 ****************************************************************************** 223 * ICache Stage 1 224 * - get tlb resp data (exceptiong info and physical addresses) 225 * - get Meta/Data SRAM read responses (latched for pipeline stop) 226 * - tag compare/hit check 227 * - check ipf and piq 228 ****************************************************************************** 229 */ 230 231 /** s1 control */ 232 val s1_valid = generatePipeControl(lastFire = s0_fire, thisFire = s1_fire, thisFlush = false.B, lastFlush = false.B) 233 234 val s1_req_vaddr = RegEnable(s0_final_vaddr, s0_fire) 235 val s1_req_vsetIdx = RegEnable(s0_final_vsetIdx, s0_fire) 236 val s1_only_first = RegEnable(s0_final_only_first, s0_fire) 237 val s1_double_line = RegEnable(s0_final_double_line, s0_fire) 238 239 /** tlb request and response */ 240 fromITLB.foreach(_.ready := true.B) 241 val s1_wait_itlb = RegInit(VecInit(Seq.fill(PortNumber)(false.B))) 242 243 (0 until PortNumber).foreach { i => 244 when(RegNext(s0_fire) && fromITLB(i).bits.miss) { 245 s1_wait_itlb(i) := true.B 246 }.elsewhen(s1_wait_itlb(i) && !fromITLB(i).bits.miss) { 247 s1_wait_itlb(i) := false.B 248 } 249 } 250 251 val s1_need_itlb = Seq((RegNext(s0_fire) || s1_wait_itlb(0)) && fromITLB(0).bits.miss, 252 (RegNext(s0_fire) || s1_wait_itlb(1)) && fromITLB(1).bits.miss && s1_double_line) 253 val toITLB_s1_valid = s1_need_itlb 254 val toITLB_s1_size = VecInit(Seq(3.U, 3.U)) // TODO: fix the size 255 val toITLB_s1_vaddr = s1_req_vaddr 256 val toITLB_s1_debug_pc = s1_req_vaddr 257 258 // chose tlb req between s0 and s1 259 for (i <- 0 until PortNumber) { 260 toITLB(i).valid := Mux(s1_need_itlb(i), toITLB_s1_valid(i), toITLB_s0_valid(i)) 261 toITLB(i).bits.size := Mux(s1_need_itlb(i), toITLB_s1_size(i), toITLB_s0_size(i)) 262 toITLB(i).bits.vaddr := Mux(s1_need_itlb(i), toITLB_s1_vaddr(i), toITLB_s0_vaddr(i)) 263 toITLB(i).bits.debug.pc := Mux(s1_need_itlb(i), toITLB_s1_debug_pc(i), toITLB_s0_debug_pc(i)) 264 } 265 toITLB.map{port => 266 port.bits.cmd := TlbCmd.exec 267 port.bits.memidx := DontCare 268 port.bits.debug.robIdx := DontCare 269 port.bits.no_translate := false.B 270 port.bits.debug.isFirstIssue := DontCare 271 port.bits.kill := DontCare 272 } 273 io.itlb.foreach(_.req_kill := false.B) 274 275 /** tlb response latch for pipeline stop */ 276 // val tlb_valid_tmp = VecInit((0 until PortNumber).map(i => 277 // (RegNext(s0_fire) || s1_wait_itlb(i)) && !fromITLB(i).bits.miss)) 278 val tlb_valid_tmp = VecInit(Seq((RegNext(s0_fire) || s1_wait_itlb(0)) && !fromITLB(0).bits.miss, 279 (RegNext(s0_fire) || s1_wait_itlb(1)) && !fromITLB(1).bits.miss && s1_double_line)) 280 val tlbRespPAddr = VecInit((0 until PortNumber).map(i => 281 ResultHoldBypass(valid = tlb_valid_tmp(i), data = fromITLB(i).bits.paddr(0)))) 282 val tlbExcpPF = VecInit((0 until PortNumber).map(i => 283 ResultHoldBypass(valid = tlb_valid_tmp(i), data = fromITLB(i).bits.excp(0).pf.instr))) 284 val tlbExcpAF = VecInit((0 until PortNumber).map(i => 285 ResultHoldBypass(valid = tlb_valid_tmp(i), data = fromITLB(i).bits.excp(0).af.instr))) 286 val tlbExcp = VecInit((0 until PortNumber).map(i => tlbExcpAF(i) || tlbExcpPF(i))) 287 288 val s1_tlb_valid = VecInit((0 until PortNumber).map(i => ValidHoldBypass(tlb_valid_tmp(i), s1_fire))) 289 val tlbRespAllValid = s1_tlb_valid(0) && (!s1_double_line || s1_double_line && s1_tlb_valid(1)) 290 291 292 def numOfStage = 3 293 val itlbMissStage = RegInit(VecInit(Seq.fill(numOfStage - 1)(0.B))) 294 itlbMissStage(0) := !tlbRespAllValid 295 for (i <- 1 until numOfStage - 1) { 296 itlbMissStage(i) := itlbMissStage(i - 1) 297 } 298 299 /** s1 hit check/tag compare */ 300 val s1_req_paddr = tlbRespPAddr 301 val s1_req_ptags = VecInit(s1_req_paddr.map(get_phy_tag(_))) 302 303 val s1_meta_ptags = ResultHoldBypass(data = metaResp.tags, valid = RegNext(s0_fire)) 304 val s1_meta_valids = ResultHoldBypass(data = metaResp.entryValid, valid = RegNext(s0_fire)) 305 val s1_meta_errors = ResultHoldBypass(data = metaResp.errors, valid = RegNext(s0_fire)) 306 307 val s1_data_cacheline = ResultHoldBypass(data = dataResp.datas, valid = RegNext(s0_fire)) 308 val s1_data_errorBits = ResultHoldBypass(data = dataResp.codes, valid = RegNext(s0_fire)) 309 310 val s1_tag_eq_vec = VecInit((0 until PortNumber).map( p => VecInit((0 until nWays).map( w => s1_meta_ptags(p)(w) === s1_req_ptags(p) )))) 311 val s1_tag_match_vec = VecInit((0 until PortNumber).map( k => VecInit(s1_tag_eq_vec(k).zipWithIndex.map{ case(way_tag_eq, w) => way_tag_eq && s1_meta_valids(k)(w) /*s1_meta_cohs(k)(w).isValid()*/}))) 312 val s1_tag_match = VecInit(s1_tag_match_vec.map(vector => ParallelOR(vector))) 313 314 val s1_port_hit = VecInit(Seq(s1_tag_match(0) && s1_valid && !tlbExcp(0), s1_tag_match(1) && s1_valid && s1_double_line && !tlbExcp(1) )) 315 val s1_bank_miss = VecInit(Seq(!s1_tag_match(0) && s1_valid && !tlbExcp(0), !s1_tag_match(1) && s1_valid && s1_double_line && !tlbExcp(1) )) 316 val s1_hit = (s1_port_hit(0) && s1_port_hit(1)) || (!s1_double_line && s1_port_hit(0)) 317 318 /** choose victim cacheline */ 319 val replacers = Seq.fill(PortNumber)(ReplacementPolicy.fromString(cacheParams.replacer,nWays,nSets/PortNumber)) 320 val s1_victim_oh = ResultHoldBypass(data = VecInit(replacers.zipWithIndex.map{case (replacer, i) => UIntToOH(replacer.way(s1_req_vsetIdx(i)(highestIdxBit, 1)))}), valid = RegNext(s0_fire)) 321 322 323 // when(s1_fire){ 324 // // when (!(PopCount(s1_tag_match_vec(0)) <= 1.U && (PopCount(s1_tag_match_vec(1)) <= 1.U || !s1_double_line))) { 325 // // printf("Multiple hit in main pipe\n") 326 // // } 327 // assert(PopCount(s1_tag_match_vec(0)) <= 1.U && (PopCount(s1_tag_match_vec(1)) <= 1.U || !s1_double_line), 328 // "Multiple hit in main pipe, port0:is=%d,ptag=0x%x,vidx=0x%x,vaddr=0x%x port1:is=%d,ptag=0x%x,vidx=0x%x,vaddr=0x%x ", 329 // PopCount(s1_tag_match_vec(0)) > 1.U,s1_req_ptags(0), get_idx(s1_req_vaddr(0)), s1_req_vaddr(0), 330 // PopCount(s1_tag_match_vec(1)) > 1.U && s1_double_line, s1_req_ptags(1), get_idx(s1_req_vaddr(1)), s1_req_vaddr(1)) 331 // } 332 333 ((replacers zip touch_sets) zip touch_ways).map{case ((r, s),w) => r.access(s,w)} 334 IPFReplacer.waymask := UIntToOH(replacers(0).way(IPFReplacer.vsetIdx)) 335 336 /** check ipf, get result at the same cycle */ 337 (0 until PortNumber).foreach { i => 338 toIPF(i).valid := tlb_valid_tmp(i) 339 toIPF(i).bits.paddr := s1_req_paddr(i) 340 } 341 val s1_ipf_hit = VecInit((0 until PortNumber).map(i => toIPF(i).valid && fromIPF(i).ipf_hit)) 342 val s1_ipf_hit_latch = VecInit((0 until PortNumber).map(i => holdReleaseLatch(valid = s1_ipf_hit(i), release = s1_fire, flush = false.B))) 343 val s1_ipf_data = VecInit((0 until PortNumber).map(i => ResultHoldBypass(data = fromIPF(i).cacheline, valid = s1_ipf_hit(i)))) 344 345 /** check in PIQ, if hit, wait until prefetch port hit */ 346 (0 until PortNumber).foreach { i => 347 toPIQ(i).valid := tlb_valid_tmp(i) 348 toPIQ(i).bits.paddr := s1_req_paddr(i) 349 } 350 val s1_piq_hit = VecInit((0 until PortNumber).map(i => toIPF(i).valid && fromPIQ(i).piq_hit)) 351 val s1_piq_hit_latch = VecInit((0 until PortNumber).map(i => holdReleaseLatch(valid = s1_piq_hit(i), release = s1_fire, flush = false.B))) 352 val wait_piq = VecInit((0 until PortNumber).map(i => toIPF(i).valid && fromPIQ(i).piq_hit && !fromPIQ(i).data_valid)) 353 val wait_piq_latch = VecInit((0 until PortNumber).map(i => holdReleaseLatch(valid = wait_piq(i), release = s1_fire || fromPIQ(i).data_valid, flush = false.B))) 354 val s1_piq_data = VecInit((0 until PortNumber).map(i => ResultHoldBypass(data = fromPIQ(i).cacheline, valid = (s1_piq_hit(i) || wait_piq_latch(i)) && fromPIQ(i).data_valid))) 355 356 val s1_wait = (0 until PortNumber).map(i => wait_piq_latch(i) && !fromPIQ(i).data_valid).reduce(_||_) 357 358 val s1_prefetch_hit = VecInit((0 until PortNumber).map(i => s1_ipf_hit_latch(i) || s1_piq_hit_latch(i))) 359 val s1_prefetch_hit_data = VecInit((0 until PortNumber).map(i => Mux(s1_ipf_hit_latch(i), s1_ipf_data(i), s1_piq_data(i)))) 360 361 s1_ready := s2_ready && tlbRespAllValid && !s1_wait || !s1_valid 362 s1_fire := s1_valid && tlbRespAllValid && s2_ready && !s1_wait 363 364 if (env.EnableDifftest) { 365 (0 until PortNumber).foreach { i => 366 val diffPIQ = DifftestModule(new DiffRefillEvent, dontCare = true) 367 diffPIQ.coreid := io.hartId 368 diffPIQ.index := (i + 7).U 369 if (i == 0) diffPIQ.valid := s1_fire && !s1_port_hit(i) && !s1_ipf_hit_latch(i) && s1_piq_hit_latch(i) && !tlbExcp(0) 370 else diffPIQ.valid := s1_fire && !s1_port_hit(i) && !s1_ipf_hit_latch(i) && s1_piq_hit_latch(i) && s1_double_line && !tlbExcp(0) && !tlbExcp(1) 371 diffPIQ.addr := s1_req_paddr(i) 372 diffPIQ.data := s1_piq_data(i).asTypeOf(diffPIQ.data) 373 diffPIQ.idtfr := DontCare 374 } 375 } 376 377 /** <PERF> replace victim way number */ 378 379 (0 until nWays).map{ w => 380 XSPerfAccumulate("line_0_hit_way_" + Integer.toString(w, 10), s1_fire && s1_port_hit(0) && OHToUInt(s1_tag_match_vec(0)) === w.U) 381 } 382 383 (0 until nWays).map{ w => 384 XSPerfAccumulate("line_0_victim_way_" + Integer.toString(w, 10), s1_fire && !s1_port_hit(0) && OHToUInt(s1_victim_oh(0)) === w.U) 385 } 386 387 (0 until nWays).map{ w => 388 XSPerfAccumulate("line_1_hit_way_" + Integer.toString(w, 10), s1_fire && s1_double_line && s1_port_hit(1) && OHToUInt(s1_tag_match_vec(1)) === w.U) 389 } 390 391 (0 until nWays).map{ w => 392 XSPerfAccumulate("line_1_victim_way_" + Integer.toString(w, 10), s1_fire && s1_double_line && !s1_port_hit(1) && OHToUInt(s1_victim_oh(1)) === w.U) 393 } 394 395 XSPerfAccumulate("mainPipe_stage1_block_by_piq_cycles", s1_valid && s1_wait) 396 397 /** 398 ****************************************************************************** 399 * ICache Stage 2 400 * - send request to MSHR if ICache miss 401 * - generate secondary miss status/data registers 402 * - response to IFU 403 ****************************************************************************** 404 */ 405 406 /** s2 control */ 407 val s2_fetch_finish = Wire(Bool()) 408 409 val s2_valid = generatePipeControl(lastFire = s1_fire, thisFire = s2_fire, thisFlush = false.B, lastFlush = false.B) 410 val s2_miss_available = Wire(Bool()) 411 412 s2_ready := (s2_valid && s2_fetch_finish && !io.respStall) || (!s2_valid && s2_miss_available) 413 s2_fire := s2_valid && s2_fetch_finish && !io.respStall 414 415 /** s2 data */ 416 // val mmio = fromPMP.map(port => port.mmio) // TODO: handle it 417 val (s2_req_paddr , s2_req_vaddr) = (RegEnable(s1_req_paddr, s1_fire), RegEnable(s1_req_vaddr, s1_fire)) 418 val s2_req_vsetIdx = RegEnable(s1_req_vsetIdx, s1_fire) 419 val s2_req_ptags = RegEnable(s1_req_ptags, s1_fire) 420 val s2_only_first = RegEnable(s1_only_first, s1_fire) 421 val s2_double_line = RegEnable(s1_double_line, s1_fire) 422 val s2_hit = RegEnable(s1_hit , s1_fire) 423 val s2_port_hit = RegEnable(s1_port_hit, s1_fire) 424 val s2_bank_miss = RegEnable(s1_bank_miss, s1_fire) 425 val s2_waymask = RegEnable(s1_victim_oh, s1_fire) 426 val s2_tag_match_vec = RegEnable(s1_tag_match_vec, s1_fire) 427 val s2_prefetch_hit = RegEnable(s1_prefetch_hit, s1_fire) 428 val s2_prefetch_hit_data = RegEnable(s1_prefetch_hit_data, s1_fire) 429 val s2_prefetch_hit_in_ipf = RegEnable(s1_ipf_hit_latch, s1_fire) 430 val s2_prefetch_hit_in_piq = RegEnable(s1_piq_hit_latch, s1_fire) 431 432 val icacheMissStage = RegInit(VecInit(Seq.fill(numOfStage - 2)(0.B))) 433 icacheMissStage(0) := !s2_hit 434 435 /** send req info of s1 and s2 to IPrefetchPipe for filter request */ 436 toIPrefetch.s1Info(0).paddr := s1_req_paddr(0) 437 toIPrefetch.s1Info(0).valid := s1_valid 438 toIPrefetch.s1Info(1).paddr := s1_req_paddr(1) 439 toIPrefetch.s1Info(1).valid := s1_valid && s1_double_line 440 toIPrefetch.s2Info(0).paddr := s2_req_paddr(0) 441 toIPrefetch.s2Info(0).valid := s2_valid 442 toIPrefetch.s2Info(1).paddr := s2_req_paddr(1) 443 toIPrefetch.s2Info(1).valid := s2_valid && s2_double_line 444 445 assert(RegNext(!s2_valid || s2_req_paddr(0)(11,0) === s2_req_vaddr(0)(11,0), true.B)) 446 447 /** status imply that s2 is a secondary miss (no need to resend miss request) */ 448 val sec_meet_vec = Wire(Vec(2, Bool())) 449 val s2_fixed_hit_vec = VecInit((0 until 2).map(i => s2_port_hit(i) || s2_prefetch_hit(i) || sec_meet_vec(i))) 450 val s2_fixed_hit = (s2_valid && s2_fixed_hit_vec(0) && s2_fixed_hit_vec(1) && s2_double_line) || (s2_valid && s2_fixed_hit_vec(0) && !s2_double_line) 451 452 val s2_meta_errors = RegEnable(s1_meta_errors, s1_fire) 453 val s2_data_errorBits = RegEnable(s1_data_errorBits, s1_fire) 454 val s2_data_cacheline = RegEnable(s1_data_cacheline, s1_fire) 455 456 val s2_data_errors = Wire(Vec(PortNumber,Vec(nWays, Bool()))) 457 458 (0 until PortNumber).map{ i => 459 val read_datas = s2_data_cacheline(i).asTypeOf(Vec(nWays,Vec(dataCodeUnitNum, UInt(dataCodeUnit.W)))) 460 val read_codes = s2_data_errorBits(i).asTypeOf(Vec(nWays,Vec(dataCodeUnitNum, UInt(dataCodeBits.W)))) 461 val data_full_wayBits = VecInit((0 until nWays).map( w => 462 VecInit((0 until dataCodeUnitNum).map(u => 463 Cat(read_codes(w)(u), read_datas(w)(u)))))) 464 val data_error_wayBits = VecInit((0 until nWays).map( w => 465 VecInit((0 until dataCodeUnitNum).map(u => 466 cacheParams.dataCode.decode(data_full_wayBits(w)(u)).error )))) 467 if(i == 0){ 468 (0 until nWays).map{ w => 469 s2_data_errors(i)(w) := RegNext(RegNext(s1_fire)) && RegNext(data_error_wayBits(w)).reduce(_||_) 470 } 471 } else { 472 (0 until nWays).map{ w => 473 s2_data_errors(i)(w) := RegNext(RegNext(s1_fire)) && RegNext(RegNext(s1_double_line)) && RegNext(data_error_wayBits(w)).reduce(_||_) 474 } 475 } 476 } 477 478 val s2_parity_meta_error = VecInit((0 until PortNumber).map(i => s2_meta_errors(i).reduce(_||_) && io.csr_parity_enable)) 479 val s2_parity_data_error = VecInit((0 until PortNumber).map(i => s2_data_errors(i).reduce(_||_) && io.csr_parity_enable)) 480 val s2_parity_error = VecInit((0 until PortNumber).map(i => RegNext(s2_parity_meta_error(i)) || s2_parity_data_error(i))) 481 482 for(i <- 0 until PortNumber){ 483 io.errors(i).valid := RegNext(s2_parity_error(i) && RegNext(RegNext(s1_fire))) 484 io.errors(i).report_to_beu := RegNext(s2_parity_error(i) && RegNext(RegNext(s1_fire))) 485 io.errors(i).paddr := RegNext(RegNext(s2_req_paddr(i))) 486 io.errors(i).source := DontCare 487 io.errors(i).source.tag := RegNext(RegNext(s2_parity_meta_error(i))) 488 io.errors(i).source.data := RegNext(s2_parity_data_error(i)) 489 io.errors(i).source.l2 := false.B 490 io.errors(i).opType := DontCare 491 io.errors(i).opType.fetch := true.B 492 } 493 XSError(s2_parity_error.reduce(_||_) && RegNext(RegNext(s1_fire)), "ICache has parity error in MainPaipe!") 494 495 496 /** exception and pmp logic **/ 497 val s2_tlb_valid = VecInit((0 until PortNumber).map(i => ValidHold(s1_tlb_valid(i) && s1_fire, s2_fire, false.B))) 498 val pmpExcpAF = VecInit(Seq(fromPMP(0).instr && s2_tlb_valid(0), fromPMP(1).instr && s2_double_line && s2_tlb_valid(1))) 499 // exception information and mmio 500 // short delay exception signal 501 val s2_except_tlb_pf = RegEnable(tlbExcpPF, s1_fire) 502 val s2_except_tlb_af = RegEnable(tlbExcpAF, s1_fire) 503 // long delay exception signal 504 val s2_except_pmp_af = DataHoldBypass(pmpExcpAF, RegNext(s1_fire)) 505 506 val s2_except = VecInit(Seq(s2_except_tlb_pf(0) || s2_except_tlb_af(0), s2_double_line && (s2_except_tlb_pf(1) || s2_except_tlb_af(1)))) 507 val s2_has_except = s2_valid && s2_except.reduce(_||_) 508 val s2_mmio = s2_valid && DataHoldBypass(io.pmp(0).resp.mmio && !s2_except(0) && !s2_except_pmp_af(0), RegNext(s1_fire)).asBool 509 // pmp port 510 io.pmp.zipWithIndex.map { case (p, i) => 511 p.req.valid := s2_valid && !missSwitchBit 512 p.req.bits.addr := s2_req_paddr(i) 513 p.req.bits.size := 3.U // TODO 514 p.req.bits.cmd := TlbCmd.exec 515 } 516 517 /*** cacheline miss logic ***/ 518 val wait_idle :: wait_queue_ready :: wait_send_req :: wait_two_resp :: wait_0_resp :: wait_1_resp :: wait_one_resp ::wait_finish :: wait_pmp_except :: Nil = Enum(9) 519 val wait_state = RegInit(wait_idle) 520 521// val port_miss_fix = VecInit(Seq(fromMSHR(0).fire && !s2_port_hit(0), fromMSHR(1).fire && s2_double_line && !s2_port_hit(1) )) 522 523 // secondary miss record registers 524 class MissSlot(implicit p: Parameters) extends ICacheBundle { 525 val m_vSetIdx = UInt(idxBits.W) 526 val m_pTag = UInt(tagBits.W) 527 val m_data = UInt(blockBits.W) 528 val m_corrupt = Bool() 529 } 530 531 val missSlot = Seq.fill(2)(RegInit(0.U.asTypeOf(new MissSlot))) 532 val m_invalid :: m_valid :: m_refilled :: m_flushed :: m_wait_sec_miss :: m_check_final ::Nil = Enum(6) 533 val missStateQueue = RegInit(VecInit(Seq.fill(2)(m_invalid)) ) 534 val reservedRefillData = Wire(Vec(2, UInt(blockBits.W))) 535 536 s2_miss_available := VecInit(missStateQueue.map(entry => entry === m_invalid || entry === m_wait_sec_miss)).reduce(_&&_) 537 538 // check miss slot 539 val fix_sec_miss = Wire(Vec(4, Bool())) 540 val sec_meet_0_miss = fix_sec_miss(0) || fix_sec_miss(2) 541 val sec_meet_1_miss = fix_sec_miss(1) || fix_sec_miss(3) 542 sec_meet_vec := VecInit(Seq(sec_meet_0_miss, sec_meet_1_miss)) 543 544 /*** miss/hit pattern: <Control Signal> only raise at the first cycle of s2_valid ***/ 545 val cacheline_0_hit = (s2_port_hit(0) || s2_prefetch_hit(0) || sec_meet_0_miss) 546 val cacheline_0_miss = !s2_port_hit(0) && !s2_prefetch_hit(0) && !sec_meet_0_miss 547 548 val cacheline_1_hit = (s2_port_hit(1) || s2_prefetch_hit(1) || sec_meet_1_miss) 549 val cacheline_1_miss = !s2_port_hit(1) && !s2_prefetch_hit(1) && !sec_meet_1_miss 550 551 val only_0_miss = RegNext(s1_fire) && cacheline_0_miss && !s2_double_line && !s2_has_except && !s2_mmio 552 val only_0_hit = RegNext(s1_fire) && cacheline_0_hit && !s2_double_line && !s2_mmio 553 val hit_0_hit_1 = RegNext(s1_fire) && cacheline_0_hit && cacheline_1_hit && s2_double_line && !s2_mmio 554 val hit_0_miss_1 = RegNext(s1_fire) && cacheline_0_hit && cacheline_1_miss && s2_double_line && !s2_has_except && !s2_mmio 555 val miss_0_hit_1 = RegNext(s1_fire) && cacheline_0_miss && cacheline_1_hit && s2_double_line && !s2_has_except && !s2_mmio 556 val miss_0_miss_1 = RegNext(s1_fire) && cacheline_0_miss && cacheline_1_miss && s2_double_line && !s2_has_except && !s2_mmio 557 558 val hit_0_except_1 = RegNext(s1_fire) && s2_double_line && !s2_except(0) && s2_except(1) && cacheline_0_hit 559 val miss_0_except_1 = RegNext(s1_fire) && s2_double_line && !s2_except(0) && s2_except(1) && cacheline_0_miss 560 val except_0 = RegNext(s1_fire) && s2_except(0) 561 562 /*** miss/hit pattern latch: <Control Signal> latch the miss/hit patter if pipeline stop ***/ 563 val only_0_miss_latch = holdReleaseLatch(valid = only_0_miss, release = s2_fire, flush = false.B) 564 val only_0_hit_latch = holdReleaseLatch(valid = only_0_hit, release = s2_fire, flush = false.B) 565 val hit_0_hit_1_latch = holdReleaseLatch(valid = hit_0_hit_1, release = s2_fire, flush = false.B) 566 val hit_0_miss_1_latch = holdReleaseLatch(valid = hit_0_miss_1, release = s2_fire, flush = false.B) 567 val miss_0_hit_1_latch = holdReleaseLatch(valid = miss_0_hit_1, release = s2_fire, flush = false.B) 568 val miss_0_miss_1_latch = holdReleaseLatch(valid = miss_0_miss_1, release = s2_fire, flush = false.B) 569 570 val hit_0_except_1_latch = holdReleaseLatch(valid = hit_0_except_1, release = s2_fire, flush = false.B) 571 val miss_0_except_1_latch = holdReleaseLatch(valid = miss_0_except_1, release = s2_fire, flush = false.B) 572 val except_0_latch = holdReleaseLatch(valid = except_0, release = s2_fire, flush = false.B) 573 574 /*** secondary miss judgment ***/ 575 def waitSecondComeIn(missState: UInt): Bool = (missState === m_wait_sec_miss) 576 577 def getMissSituat(slotNum : Int, missNum : Int ) :Bool = { 578 RegNext(s1_fire) && 579 RegNext(missSlot(slotNum).m_vSetIdx === s1_req_vsetIdx(missNum)) && 580 RegNext(missSlot(slotNum).m_pTag === s1_req_ptags(missNum)) && 581 !s2_port_hit(missNum) && !s2_prefetch_hit(missNum) && 582 waitSecondComeIn(missStateQueue(slotNum)) 583 } 584 585 /*** compare new req and last req saved in miss slot ***/ 586 val miss_0_s2_0 = getMissSituat(slotNum = 0, missNum = 0) 587 val miss_0_s2_1 = getMissSituat(slotNum = 0, missNum = 1) 588 val miss_1_s2_0 = getMissSituat(slotNum = 1, missNum = 0) 589 val miss_1_s2_1 = getMissSituat(slotNum = 1, missNum = 1) 590 591 val miss_0_s2_0_latch = holdReleaseLatch(valid = miss_0_s2_0, release = s2_fire, flush = false.B) 592 val miss_0_s2_1_latch = holdReleaseLatch(valid = miss_0_s2_1, release = s2_fire, flush = false.B) 593 val miss_1_s2_0_latch = holdReleaseLatch(valid = miss_1_s2_0, release = s2_fire, flush = false.B) 594 val miss_1_s2_1_latch = holdReleaseLatch(valid = miss_1_s2_1, release = s2_fire, flush = false.B) 595 596 val slot_0_solve = fix_sec_miss(0) || fix_sec_miss(1) 597 val slot_1_solve = fix_sec_miss(2) || fix_sec_miss(3) 598 val slot_slove = VecInit(Seq(slot_0_solve, slot_1_solve)) 599 fix_sec_miss := VecInit(Seq(miss_0_s2_0_latch, miss_0_s2_1_latch, miss_1_s2_0_latch, miss_1_s2_1_latch)) 600 601 /*** reserved data for secondary miss ***/ 602 reservedRefillData(0) := DataHoldBypass(data = missSlot(0).m_data, valid = miss_0_s2_0 || miss_0_s2_1) 603 reservedRefillData(1) := DataHoldBypass(data = missSlot(1).m_data, valid = miss_1_s2_0 || miss_1_s2_1) 604 605 /*** miss state machine ***/ 606 607 //deal with not-cache-hit pmp af 608 val only_pmp_af = Wire(Vec(2, Bool())) 609 only_pmp_af(0) := s2_except_pmp_af(0) && cacheline_0_miss && !s2_except(0) && s2_valid 610 only_pmp_af(1) := s2_except_pmp_af(1) && cacheline_1_miss && !s2_except(1) && s2_valid && s2_double_line 611 612 switch(wait_state){ 613 is(wait_idle){ 614 when(only_pmp_af(0) || only_pmp_af(1) || s2_mmio){ 615 //should not send req to MissUnit when there is an access exception in PMP 616 //But to avoid using pmp exception in control signal (like s2_fire), should delay 1 cycle. 617 //NOTE: pmp exception cache line also could hit in ICache, but the result is meaningless. Just give the exception signals. 618 wait_state := wait_finish 619 }.elsewhen(miss_0_except_1_latch){ 620 wait_state := Mux(toMSHR(0).ready, wait_queue_ready ,wait_idle ) 621 }.elsewhen(only_0_miss_latch || miss_0_hit_1_latch){ 622 wait_state := Mux(toMSHR(0).ready, wait_queue_ready ,wait_idle ) 623 }.elsewhen(hit_0_miss_1_latch){ 624 wait_state := Mux(toMSHR(1).ready, wait_queue_ready ,wait_idle ) 625 }.elsewhen(miss_0_miss_1_latch ){ 626 wait_state := Mux(toMSHR(0).ready && toMSHR(1).ready, wait_queue_ready ,wait_idle) 627 } 628 } 629 630 is(wait_queue_ready){ 631 wait_state := wait_send_req 632 } 633 634 is(wait_send_req) { 635 when(miss_0_except_1_latch || only_0_miss_latch || hit_0_miss_1_latch || miss_0_hit_1_latch){ 636 wait_state := wait_one_resp 637 }.elsewhen( miss_0_miss_1_latch ){ 638 wait_state := wait_two_resp 639 } 640 } 641 642 is(wait_one_resp) { 643 when( (miss_0_except_1_latch ||only_0_miss_latch || miss_0_hit_1_latch) && fromMSHR(0).fire){ 644 wait_state := wait_finish 645 }.elsewhen( hit_0_miss_1_latch && fromMSHR(1).fire){ 646 wait_state := wait_finish 647 } 648 } 649 650 is(wait_two_resp) { 651 when(fromMSHR(0).fire && fromMSHR(1).fire){ 652 wait_state := wait_finish 653 }.elsewhen( !fromMSHR(0).fire && fromMSHR(1).fire ){ 654 wait_state := wait_0_resp 655 }.elsewhen(fromMSHR(0).fire && !fromMSHR(1).fire){ 656 wait_state := wait_1_resp 657 } 658 } 659 660 is(wait_0_resp) { 661 when(fromMSHR(0).fire){ 662 wait_state := wait_finish 663 } 664 } 665 666 is(wait_1_resp) { 667 when(fromMSHR(1).fire){ 668 wait_state := wait_finish 669 } 670 } 671 672 is(wait_finish) {when(s2_fire) {wait_state := wait_idle } 673 } 674 } 675 676 677 /*** send request to MissUnit ***/ 678 679 (0 until 2).map { i => 680 if(i == 1) toMSHR(i).valid := (hit_0_miss_1_latch || miss_0_miss_1_latch) && wait_state === wait_queue_ready && !s2_mmio 681 else toMSHR(i).valid := (only_0_miss_latch || miss_0_hit_1_latch || miss_0_miss_1_latch || miss_0_except_1_latch) && wait_state === wait_queue_ready && !s2_mmio 682 toMSHR(i).bits.paddr := s2_req_paddr(i) 683 toMSHR(i).bits.vaddr := s2_req_vaddr(i) 684 toMSHR(i).bits.waymask := s2_waymask(i) 685 686 687 when(toMSHR(i).fire && missStateQueue(i) === m_invalid){ 688 missStateQueue(i) := m_valid 689 missSlot(i).m_vSetIdx := s2_req_vsetIdx(i) 690 missSlot(i).m_pTag := get_phy_tag(s2_req_paddr(i)) 691 } 692 693 when(fromMSHR(i).fire && missStateQueue(i) === m_valid ){ 694 missStateQueue(i) := m_refilled 695 missSlot(i).m_data := fromMSHR(i).bits.data 696 missSlot(i).m_corrupt := fromMSHR(i).bits.corrupt 697 } 698 699 700 when(s2_fire && missStateQueue(i) === m_refilled){ 701 missStateQueue(i) := m_wait_sec_miss 702 } 703 704 /*** Only the first cycle to check whether meet the secondary miss ***/ 705 when(missStateQueue(i) === m_wait_sec_miss){ 706 /*** The seondary req has been fix by this slot and another also hit || the secondary req for other cacheline and hit ***/ 707 when((slot_slove(i) && s2_fire) || (!slot_slove(i) && s2_fire) ) { 708 missStateQueue(i) := m_invalid 709 } 710 /*** The seondary req has been fix by this slot but another miss/f3 not ready || the seondary req for other cacheline and miss ***/ 711 .elsewhen((slot_slove(i) && !s2_fire && s2_valid) || (s2_valid && !slot_slove(i) && !s2_fire) ){ 712 missStateQueue(i) := m_check_final 713 } 714 } 715 716 when(missStateQueue(i) === m_check_final && toMSHR(i).fire){ 717 missStateQueue(i) := m_valid 718 missSlot(i).m_vSetIdx := s2_req_vsetIdx(i) 719 missSlot(i).m_pTag := get_phy_tag(s2_req_paddr(i)) 720 }.elsewhen(missStateQueue(i) === m_check_final) { 721 missStateQueue(i) := m_invalid 722 } 723 } 724 725 when(toMSHR.map(_.valid).reduce(_||_)){ 726 missSwitchBit := true.B 727 }.elsewhen(missSwitchBit && s2_fetch_finish){ 728 missSwitchBit := false.B 729 } 730 731 (0 until PortNumber).foreach{ 732 i => 733 toIPrefetch.missSlot(i).valid := missStateQueue(i) =/= m_invalid 734 toIPrefetch.missSlot(i).vSetIdx := missSlot(i).m_vSetIdx 735 toIPrefetch.missSlot(i).ptag := missSlot(i).m_pTag 736 } 737 738 val miss_all_fix = wait_state === wait_finish 739 740 s2_fetch_finish := ((s2_valid && s2_fixed_hit) || miss_all_fix || hit_0_except_1_latch || except_0_latch) 741 742 /** update replacement status register: 0 is hit access/ 1 is miss access */ 743 (touch_ways zip touch_sets).zipWithIndex.map{ case((t_w,t_s), i) => 744 t_s(0) := s2_req_vsetIdx(i)(highestIdxBit, 1) 745 t_w(0).valid := s2_valid && s2_port_hit(i) 746 t_w(0).bits := OHToUInt(s2_tag_match_vec(i)) 747 748 t_s(1) := s2_req_vsetIdx(i)(highestIdxBit, 1) 749 t_w(1).valid := s2_valid && !s2_port_hit(i) 750 t_w(1).bits := OHToUInt(s2_waymask(i)) 751 } 752 753 //** use hit one-hot select data 754 val s2_hit_datas = VecInit(s2_data_cacheline.zipWithIndex.map { case(bank, i) => 755 val port_hit_data = Mux1H(s2_tag_match_vec(i).asUInt, bank) 756 port_hit_data 757 }) 758 759 val s2_register_datas = Wire(Vec(2, UInt(blockBits.W))) 760 761 s2_register_datas.zipWithIndex.map{case(bank,i) => 762 // if(i == 0) bank := Mux(s2_port_hit(i), s2_hit_datas(i), Mux(miss_0_s2_0_latch,reservedRefillData(0), Mux(miss_1_s2_0_latch,reservedRefillData(1), missSlot(0).m_data))) 763 // else bank := Mux(s2_port_hit(i), s2_hit_datas(i), Mux(miss_0_s2_1_latch,reservedRefillData(0), Mux(miss_1_s2_1_latch,reservedRefillData(1), missSlot(1).m_data))) 764 if(i == 0) bank := Mux(miss_0_s2_0_latch,reservedRefillData(0), Mux(miss_1_s2_0_latch,reservedRefillData(1), missSlot(0).m_data)) 765 else bank := Mux(miss_0_s2_1_latch,reservedRefillData(0), Mux(miss_1_s2_1_latch,reservedRefillData(1), missSlot(1).m_data)) 766 } 767 768 /** response to IFU */ 769 770 (0 until PortNumber).map{ i => 771 if(i ==0) toIFU(i).valid := s2_fire 772 else toIFU(i).valid := s2_fire && s2_double_line 773 //when select is high, use sramData. Otherwise, use registerData. 774 toIFU(i).bits.registerData := s2_register_datas(i) 775 toIFU(i).bits.sramData := Mux(s2_port_hit(i), s2_hit_datas(i), s2_prefetch_hit_data(i)) 776 toIFU(i).bits.select := s2_port_hit(i) || s2_prefetch_hit(i) 777 toIFU(i).bits.paddr := s2_req_paddr(i) 778 toIFU(i).bits.vaddr := s2_req_vaddr(i) 779 toIFU(i).bits.tlbExcp.pageFault := s2_except_tlb_pf(i) 780 toIFU(i).bits.tlbExcp.accessFault := s2_except_tlb_af(i) || missSlot(i).m_corrupt || s2_except_pmp_af(i) 781 toIFU(i).bits.tlbExcp.mmio := s2_mmio 782 783 when(RegNext(s2_fire && missSlot(i).m_corrupt)){ 784 io.errors(i).valid := true.B 785 io.errors(i).report_to_beu := false.B // l2 should have report that to bus error unit, no need to do it again 786 io.errors(i).paddr := RegNext(s2_req_paddr(i)) 787 io.errors(i).source.tag := false.B 788 io.errors(i).source.data := false.B 789 io.errors(i).source.l2 := true.B 790 } 791 } 792 io.fetch.topdownIcacheMiss := !s2_hit 793 io.fetch.topdownItlbMiss := itlbMissStage(0) 794 795 (0 until 2).map {i => 796 XSPerfAccumulate("port_" + i + "_only_hit_in_ipf", !s2_port_hit(i) && s2_prefetch_hit(i) && s2_fire) 797 } 798 799 io.perfInfo.only_0_hit := only_0_hit_latch 800 io.perfInfo.only_0_miss := only_0_miss_latch 801 io.perfInfo.hit_0_hit_1 := hit_0_hit_1_latch 802 io.perfInfo.hit_0_miss_1 := hit_0_miss_1_latch 803 io.perfInfo.miss_0_hit_1 := miss_0_hit_1_latch 804 io.perfInfo.miss_0_miss_1 := miss_0_miss_1_latch 805 io.perfInfo.hit_0_except_1 := hit_0_except_1_latch 806 io.perfInfo.miss_0_except_1 := miss_0_except_1_latch 807 io.perfInfo.except_0 := except_0_latch 808 io.perfInfo.bank_hit(0) := only_0_miss_latch || hit_0_hit_1_latch || hit_0_miss_1_latch || hit_0_except_1_latch 809 io.perfInfo.bank_hit(1) := miss_0_hit_1_latch || hit_0_hit_1_latch 810 io.perfInfo.hit := hit_0_hit_1_latch || only_0_hit_latch || hit_0_except_1_latch || except_0_latch 811 812 /** <PERF> fetch bubble generated by icache miss*/ 813 814 XSPerfAccumulate("icache_bubble_s2_miss", s2_valid && !s2_fetch_finish ) 815 816 // TODO: this perf is wrong! 817 val tlb_miss_vec = VecInit((0 until PortNumber).map(i => toITLB(i).valid && s0_can_go && fromITLB(i).bits.miss)) 818 val tlb_has_miss = tlb_miss_vec.reduce(_ || _) 819 XSPerfAccumulate("icache_bubble_s0_tlb_miss", s0_valid && tlb_has_miss ) 820 821 if (env.EnableDifftest) { 822 val discards = (0 until PortNumber).map { i => 823 val discard = toIFU(i).bits.tlbExcp.pageFault || toIFU(i).bits.tlbExcp.accessFault || toIFU(i).bits.tlbExcp.mmio 824 discard 825 } 826 (0 until PortNumber).map { i => 827 val diffMainPipeOut = DifftestModule(new DiffRefillEvent, dontCare = true) 828 diffMainPipeOut.coreid := io.hartId 829 diffMainPipeOut.index := (4 + i).U 830 if (i == 0) diffMainPipeOut.valid := s2_fire && !discards(0) 831 else diffMainPipeOut.valid := s2_fire && s2_double_line && !discards(0) && !discards(1) 832 diffMainPipeOut.addr := s2_req_paddr(i) 833 when (toIFU(i).bits.select.asBool) { 834 diffMainPipeOut.data := toIFU(i).bits.sramData.asTypeOf(diffMainPipeOut.data) 835 } .otherwise { 836 diffMainPipeOut.data := toIFU(i).bits.registerData.asTypeOf(diffMainPipeOut.data) 837 } 838 // idtfr: 1 -> data from icache 2 -> data from ipf 3 -> data from piq 4 -> data from missUnit 839 when (s2_port_hit(i)) { diffMainPipeOut.idtfr := 1.U } 840 .elsewhen(s2_prefetch_hit(i)) { 841 when (s2_prefetch_hit_in_ipf(i)) { diffMainPipeOut.idtfr := 2.U } 842 .elsewhen(s2_prefetch_hit_in_piq(i)) { diffMainPipeOut.idtfr := 3.U } 843 .otherwise { diffMainPipeOut.idtfr := DontCare; XSWarn(true.B, "should not in this situation\n") } 844 } 845 .otherwise { diffMainPipeOut.idtfr := 4.U } 846 diffMainPipeOut 847 } 848 } 849} 850