1/*************************************************************************************** 2* Copyright (c) 2024 Beijing Institute of Open Source Chip (BOSC) 3* Copyright (c) 2020-2024 Institute of Computing Technology, Chinese Academy of Sciences 4* Copyright (c) 2020-2021 Peng Cheng Laboratory 5* 6* XiangShan is licensed under Mulan PSL v2. 7* You can use this software according to the terms and conditions of the Mulan PSL v2. 8* You may obtain a copy of Mulan PSL v2 at: 9* http://license.coscl.org.cn/MulanPSL2 10* 11* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 12* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 13* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 14* 15* See the Mulan PSL v2 for more details. 16***************************************************************************************/ 17 18package xiangshan.frontend.icache 19 20import chisel3._ 21import chisel3.util._ 22import difftest._ 23import org.chipsalliance.cde.config.Parameters 24import utility._ 25import xiangshan._ 26import xiangshan.backend.fu.PMPReqBundle 27import xiangshan.backend.fu.PMPRespBundle 28import xiangshan.cache.mmu._ 29import xiangshan.frontend.ExceptionType 30import xiangshan.frontend.FtqToICacheRequestBundle 31 32class ICacheMainPipeResp(implicit p: Parameters) extends ICacheBundle { 33 val doubleline: Bool = Bool() 34 val vaddr: Vec[UInt] = Vec(PortNumber, UInt(VAddrBits.W)) 35 val data: UInt = UInt(blockBits.W) 36 val paddr: Vec[UInt] = Vec(PortNumber, UInt(PAddrBits.W)) 37 val exception: Vec[UInt] = Vec(PortNumber, UInt(ExceptionType.width.W)) 38 val pmp_mmio: Vec[Bool] = Vec(PortNumber, Bool()) 39 val itlb_pbmt: Vec[UInt] = Vec(PortNumber, UInt(Pbmt.width.W)) 40 val backendException: Bool = Bool() 41 /* NOTE: GPAddrBits(=50bit) is not enough for gpaddr here, refer to PR#3795 42 * Sv48*4 only allows 50bit gpaddr, when software violates this requirement 43 * it needs to fill the mtval2 register with the full XLEN(=64bit) gpaddr, 44 * PAddrBitsMax(=56bit currently) is required for the frontend datapath due to the itlb ppn length limitation 45 * (cases 56<x<=64 are handled by the backend datapath) 46 */ 47 val gpaddr: UInt = UInt(PAddrBitsMax.W) 48 val isForVSnonLeafPTE: Bool = Bool() 49} 50 51class ICacheMainPipeBundle(implicit p: Parameters) extends ICacheBundle { 52 val req: DecoupledIO[FtqToICacheRequestBundle] = Flipped(DecoupledIO(new FtqToICacheRequestBundle)) 53 val resp: Valid[ICacheMainPipeResp] = ValidIO(new ICacheMainPipeResp) 54 val topdownIcacheMiss: Bool = Output(Bool()) 55 val topdownItlbMiss: Bool = Output(Bool()) 56} 57 58class ICacheMetaReqBundle(implicit p: Parameters) extends ICacheBundle { 59 val toIMeta: DecoupledIO[ICacheReadBundle] = DecoupledIO(new ICacheReadBundle) 60 val fromIMeta: ICacheMetaRespBundle = Input(new ICacheMetaRespBundle) 61} 62 63class ICacheDataReqBundle(implicit p: Parameters) extends ICacheBundle { 64 val toIData: Vec[DecoupledIO[ICacheReadBundle]] = Vec(partWayNum, DecoupledIO(new ICacheReadBundle)) 65 val fromIData: ICacheDataRespBundle = Input(new ICacheDataRespBundle) 66} 67 68class ICacheMSHRBundle(implicit p: Parameters) extends ICacheBundle { 69 val req: DecoupledIO[ICacheMissReq] = DecoupledIO(new ICacheMissReq) 70 val resp: Valid[ICacheMissResp] = Flipped(ValidIO(new ICacheMissResp)) 71} 72 73class ICachePMPBundle(implicit p: Parameters) extends ICacheBundle { 74 val req: Valid[PMPReqBundle] = ValidIO(new PMPReqBundle()) 75 val resp: PMPRespBundle = Input(new PMPRespBundle()) 76} 77 78class ICachePerfInfo(implicit p: Parameters) extends ICacheBundle { 79 val only_0_hit: Bool = Bool() 80 val only_0_miss: Bool = Bool() 81 val hit_0_hit_1: Bool = Bool() 82 val hit_0_miss_1: Bool = Bool() 83 val miss_0_hit_1: Bool = Bool() 84 val miss_0_miss_1: Bool = Bool() 85 val hit_0_except_1: Bool = Bool() 86 val miss_0_except_1: Bool = Bool() 87 val except_0: Bool = Bool() 88 val bank_hit: Vec[Bool] = Vec(PortNumber, Bool()) 89 val hit: Bool = Bool() 90} 91 92class ICacheMainPipeInterface(implicit p: Parameters) extends ICacheBundle { 93 val hartId: UInt = Input(UInt(hartIdLen.W)) 94 95 /*** internal interface ***/ 96 val dataArray: ICacheDataReqBundle = new ICacheDataReqBundle 97 val metaArrayFlush: Vec[Valid[ICacheMetaFlushBundle]] = Vec(PortNumber, ValidIO(new ICacheMetaFlushBundle)) 98 val touch: Vec[Valid[ReplacerTouch]] = Vec(PortNumber, ValidIO(new ReplacerTouch)) 99 val wayLookupRead: DecoupledIO[WayLookupInfo] = Flipped(DecoupledIO(new WayLookupInfo)) 100 val mshr: ICacheMSHRBundle = new ICacheMSHRBundle 101 val ecc_enable: Bool = Input(Bool()) 102 103 /*** outside interface ***/ 104 // FTQ 105 val fetch: ICacheMainPipeBundle = new ICacheMainPipeBundle 106 val flush: Bool = Input(Bool()) 107 // PMP 108 val pmp: Vec[ICachePMPBundle] = Vec(PortNumber, new ICachePMPBundle) 109 // IFU 110 val respStall: Bool = Input(Bool()) 111 // backend/BEU 112 val errors: Vec[Valid[L1CacheErrorInfo]] = Output(Vec(PortNumber, ValidIO(new L1CacheErrorInfo))) 113 114 /*** PERF ***/ 115 val perfInfo: ICachePerfInfo = Output(new ICachePerfInfo) 116} 117 118//class ICacheDB(implicit p: Parameters) extends ICacheBundle { 119// val blk_vaddr: UInt = UInt((VAddrBits - blockOffBits).W) 120// val blk_paddr: UInt = UInt((PAddrBits - blockOffBits).W) 121// val hit: Bool = Bool() 122//} 123 124class ICacheMainPipe(implicit p: Parameters) extends ICacheModule with HasICacheECCHelper { 125 val io: ICacheMainPipeInterface = IO(new ICacheMainPipeInterface) 126 127 /** Input/Output port */ 128 private val (fromFtq, toIFU) = (io.fetch.req, io.fetch.resp) 129 private val (toData, fromData) = (io.dataArray.toIData, io.dataArray.fromIData) 130 private val toMetaFlush = io.metaArrayFlush 131 private val (toMSHR, fromMSHR) = (io.mshr.req, io.mshr.resp) 132 private val (toPMP, fromPMP) = (io.pmp.map(_.req), io.pmp.map(_.resp)) 133 private val fromWayLookup = io.wayLookupRead 134 private val ecc_enable = 135 if (ICacheForceMetaECCError || ICacheForceDataECCError) true.B else io.ecc_enable 136 137 // Statistics on the frequency distribution of FTQ fire interval 138 private val cntFtqFireInterval = RegInit(0.U(32.W)) 139 private val cntFtqFireIntervalStart = 1 140 private val cntFtqFireIntervalEnd = 300 141 cntFtqFireInterval := Mux(fromFtq.fire, 1.U, cntFtqFireInterval + 1.U) 142 XSPerfHistogram( 143 "ftq2icache_fire", 144 cntFtqFireInterval, 145 fromFtq.fire, 146 cntFtqFireIntervalStart, 147 cntFtqFireIntervalEnd, 148 right_strict = true 149 ) 150 151 /** pipeline control signal */ 152 val s1_ready, s2_ready = Wire(Bool()) 153 val s0_fire, s1_fire, s2_fire = Wire(Bool()) 154 val s0_flush, s1_flush, s2_flush = Wire(Bool()) 155 156 /** 157 ****************************************************************************** 158 * ICache Stage 0 159 * - send req to data SRAM 160 * - get waymask and tlb info from wayLookup 161 ****************************************************************************** 162 */ 163 164 /** s0 control */ 165 // 0,1,2,3 -> dataArray(data); 4 -> mainPipe 166 // Ftq RegNext Register 167 private val fromFtqReq = fromFtq.bits.pcMemRead 168 private val s0_valid = fromFtq.valid 169 private val s0_req_valid_all = (0 until partWayNum + 1).map(i => fromFtq.bits.readValid(i)) 170 private val s0_req_vaddr_all = 171 (0 until partWayNum + 1).map(i => VecInit(Seq(fromFtqReq(i).startAddr, fromFtqReq(i).nextlineStart))) 172 private val s0_req_vSetIdx_all = (0 until partWayNum + 1).map(i => VecInit(s0_req_vaddr_all(i).map(get_idx))) 173 private val s0_req_offset_all = (0 until partWayNum + 1).map(i => s0_req_vaddr_all(i)(0)(log2Ceil(blockBytes) - 1, 0)) 174 private val s0_doubleline_all = 175 (0 until partWayNum + 1).map(i => fromFtq.bits.readValid(i) && fromFtqReq(i).crossCacheline) 176 177 private val s0_req_vaddr = s0_req_vaddr_all.last 178 private val s0_req_vSetIdx = s0_req_vSetIdx_all.last 179 private val s0_doubleline = s0_doubleline_all.last 180 181 private val s0_backendException = fromFtq.bits.backendException 182 183 /** 184 ****************************************************************************** 185 * get waymask and tlb info from wayLookup 186 ****************************************************************************** 187 */ 188 fromWayLookup.ready := s0_fire 189 private val s0_waymasks = VecInit(fromWayLookup.bits.waymask.map(_.asTypeOf(Vec(nWays, Bool())))) 190 private val s0_req_ptags = fromWayLookup.bits.ptag 191 private val s0_req_gpaddr = fromWayLookup.bits.gpaddr 192 private val s0_req_isForVSnonLeafPTE = fromWayLookup.bits.isForVSnonLeafPTE 193 private val s0_itlb_exception = fromWayLookup.bits.itlb_exception 194 private val s0_itlb_pbmt = fromWayLookup.bits.itlb_pbmt 195 private val s0_meta_codes = fromWayLookup.bits.meta_codes 196 private val s0_hits = VecInit(fromWayLookup.bits.waymask.map(_.orR)) 197 198 when(s0_fire) { 199 assert( 200 (0 until PortNumber).map(i => s0_req_vSetIdx(i) === fromWayLookup.bits.vSetIdx(i)).reduce(_ && _), 201 "vSetIdx from ftq and wayLookup mismatch! vaddr=0x%x ftq: vSet0=0x%x vSet1=0x%x wayLookup: vSet0=0x%x vSet1=0x%x", 202 s0_req_vaddr(0), 203 s0_req_vSetIdx(0), 204 s0_req_vSetIdx(1), 205 fromWayLookup.bits.vSetIdx(0), 206 fromWayLookup.bits.vSetIdx(1) 207 ) 208 } 209 210 /** 211 ****************************************************************************** 212 * data SRAM request 213 ****************************************************************************** 214 */ 215 (0 until partWayNum).foreach { i => 216 toData(i).valid := s0_req_valid_all(i) 217 toData(i).bits.isDoubleLine := s0_doubleline_all(i) 218 toData(i).bits.vSetIdx := s0_req_vSetIdx_all(i) 219 toData(i).bits.blkOffset := s0_req_offset_all(i) 220 toData(i).bits.waymask := s0_waymasks 221 } 222 223 private val s0_can_go = toData.last.ready && fromWayLookup.valid && s1_ready 224 s0_flush := io.flush 225 s0_fire := s0_valid && s0_can_go && !s0_flush 226 227 fromFtq.ready := s0_can_go 228 229 /** 230 ****************************************************************************** 231 * ICache Stage 1 232 * - PMP check 233 * - get Data SRAM read responses (latched for pipeline stop) 234 * - monitor missUint response port 235 ****************************************************************************** 236 */ 237 private val s1_valid = 238 generatePipeControl(lastFire = s0_fire, thisFire = s1_fire, thisFlush = s1_flush, lastFlush = false.B) 239 240 private val s1_req_vaddr = RegEnable(s0_req_vaddr, 0.U.asTypeOf(s0_req_vaddr), s0_fire) 241 private val s1_req_ptags = RegEnable(s0_req_ptags, 0.U.asTypeOf(s0_req_ptags), s0_fire) 242 private val s1_req_gpaddr = RegEnable(s0_req_gpaddr, 0.U.asTypeOf(s0_req_gpaddr), s0_fire) 243 private val s1_req_isForVSnonLeafPTE = 244 RegEnable(s0_req_isForVSnonLeafPTE, 0.U.asTypeOf(s0_req_isForVSnonLeafPTE), s0_fire) 245 private val s1_doubleline = RegEnable(s0_doubleline, 0.U.asTypeOf(s0_doubleline), s0_fire) 246 private val s1_SRAMhits = RegEnable(s0_hits, 0.U.asTypeOf(s0_hits), s0_fire) 247 private val s1_itlb_exception = RegEnable(s0_itlb_exception, 0.U.asTypeOf(s0_itlb_exception), s0_fire) 248 private val s1_backendException = RegEnable(s0_backendException, false.B, s0_fire) 249 private val s1_itlb_pbmt = RegEnable(s0_itlb_pbmt, 0.U.asTypeOf(s0_itlb_pbmt), s0_fire) 250 private val s1_waymasks = RegEnable(s0_waymasks, 0.U.asTypeOf(s0_waymasks), s0_fire) 251 private val s1_meta_codes = RegEnable(s0_meta_codes, 0.U.asTypeOf(s0_meta_codes), s0_fire) 252 253 private val s1_req_vSetIdx = s1_req_vaddr.map(get_idx) 254 private val s1_req_paddr = getPaddrFromPtag(s1_req_vaddr, s1_req_ptags) 255 private val s1_req_offset = s1_req_vaddr(0)(log2Ceil(blockBytes) - 1, 0) 256 257 // do metaArray ECC check 258 private val s1_meta_corrupt = VecInit((s1_req_ptags zip s1_meta_codes zip s1_waymasks).map { 259 case ((meta, code), waymask) => 260 val hit_num = PopCount(waymask) 261 // NOTE: if not hit, encodeMetaECC(meta) =/= code can also be true, but we don't care about it 262 (encodeMetaECC(meta) =/= code && hit_num === 1.U) || // hit one way, but parity code does not match, ECC failure 263 hit_num > 1.U // hit multi-way, must be an ECC failure 264 }) 265 // force clear meta_corrupt when parity check is disabled 266 when(!ecc_enable) { 267 s1_meta_corrupt := VecInit(Seq.fill(PortNumber)(false.B)) 268 } 269 270 /** 271 ****************************************************************************** 272 * update replacement status register 273 ****************************************************************************** 274 */ 275 (0 until PortNumber).foreach { i => 276 io.touch(i).bits.vSetIdx := s1_req_vSetIdx(i) 277 io.touch(i).bits.way := OHToUInt(s1_waymasks(i)) 278 } 279 io.touch(0).valid := RegNext(s0_fire) && s1_SRAMhits(0) 280 io.touch(1).valid := RegNext(s0_fire) && s1_SRAMhits(1) && s1_doubleline 281 282 /** 283 ****************************************************************************** 284 * PMP check 285 ****************************************************************************** 286 */ 287 toPMP.zipWithIndex.foreach { case (p, i) => 288 // if itlb has exception, paddr can be invalid, therefore pmp check can be skipped do not do this now for timing 289 p.valid := s1_valid // && !ExceptionType.hasException(s1_itlb_exception(i)) 290 p.bits.addr := s1_req_paddr(i) 291 p.bits.size := 3.U 292 p.bits.cmd := TlbCmd.exec 293 } 294 private val s1_pmp_exception = VecInit(fromPMP.map(ExceptionType.fromPMPResp)) 295 private val s1_pmp_mmio = VecInit(fromPMP.map(_.mmio)) 296 297 // merge s1 itlb/pmp exceptions, itlb has the highest priority, pmp next 298 private val s1_exception_out = ExceptionType.merge( 299 s1_itlb_exception, 300 s1_pmp_exception 301 ) 302 303 /** 304 ****************************************************************************** 305 * select data from MSHR, SRAM 306 ****************************************************************************** 307 */ 308 private val s1_MSHR_match = VecInit((0 until PortNumber).map { i => 309 (s1_req_vSetIdx(i) === fromMSHR.bits.vSetIdx) && 310 (s1_req_ptags(i) === getPhyTagFromBlk(fromMSHR.bits.blkPaddr)) && 311 fromMSHR.valid && !fromMSHR.bits.corrupt 312 }) 313 private val s1_MSHR_hits = Seq(s1_valid && s1_MSHR_match(0), s1_valid && (s1_MSHR_match(1) && s1_doubleline)) 314 private val s1_MSHR_datas = fromMSHR.bits.data.asTypeOf(Vec(ICacheDataBanks, UInt((blockBits / ICacheDataBanks).W))) 315 316 private val s1_hits = (0 until PortNumber).map { i => 317 ValidHoldBypass(s1_MSHR_hits(i) || (RegNext(s0_fire) && s1_SRAMhits(i)), s1_fire || s1_flush) 318 } 319 320 private val s1_bankIdxLow = (s1_req_offset >> log2Ceil(blockBytes / ICacheDataBanks)).asUInt 321 private val s1_bankMSHRHit = VecInit((0 until ICacheDataBanks).map { i => 322 (i.U >= s1_bankIdxLow) && s1_MSHR_hits(0) || 323 (i.U < s1_bankIdxLow) && s1_MSHR_hits(1) 324 }) 325 private val s1_datas = VecInit((0 until ICacheDataBanks).map { i => 326 DataHoldBypass(Mux(s1_bankMSHRHit(i), s1_MSHR_datas(i), fromData.datas(i)), s1_bankMSHRHit(i) || RegNext(s0_fire)) 327 }) 328 private val s1_data_is_from_MSHR = VecInit((0 until ICacheDataBanks).map { i => 329 DataHoldBypass(s1_bankMSHRHit(i), s1_bankMSHRHit(i) || RegNext(s0_fire)) 330 }) 331 private val s1_codes = DataHoldBypass(fromData.codes, RegNext(s0_fire)) 332 333 s1_flush := io.flush 334 s1_ready := s2_ready || !s1_valid 335 s1_fire := s1_valid && s2_ready && !s1_flush 336 337 /** 338 ****************************************************************************** 339 * ICache Stage 2 340 * - send request to MSHR if ICache miss 341 * - monitor missUint response port 342 * - response to IFU 343 ****************************************************************************** 344 */ 345 346 private val s2_valid = 347 generatePipeControl(lastFire = s1_fire, thisFire = s2_fire, thisFlush = s2_flush, lastFlush = false.B) 348 349 private val s2_req_vaddr = RegEnable(s1_req_vaddr, 0.U.asTypeOf(s1_req_vaddr), s1_fire) 350 private val s2_req_ptags = RegEnable(s1_req_ptags, 0.U.asTypeOf(s1_req_ptags), s1_fire) 351 private val s2_req_gpaddr = RegEnable(s1_req_gpaddr, 0.U.asTypeOf(s1_req_gpaddr), s1_fire) 352 private val s2_req_isForVSnonLeafPTE = 353 RegEnable(s1_req_isForVSnonLeafPTE, 0.U.asTypeOf(s1_req_isForVSnonLeafPTE), s1_fire) 354 private val s2_doubleline = RegEnable(s1_doubleline, 0.U.asTypeOf(s1_doubleline), s1_fire) 355 private val s2_exception = RegEnable(s1_exception_out, 0.U.asTypeOf(s1_exception_out), s1_fire) 356 private val s2_backendException = RegEnable(s1_backendException, false.B, s1_fire) 357 private val s2_pmp_mmio = RegEnable(s1_pmp_mmio, 0.U.asTypeOf(s1_pmp_mmio), s1_fire) 358 private val s2_itlb_pbmt = RegEnable(s1_itlb_pbmt, 0.U.asTypeOf(s1_itlb_pbmt), s1_fire) 359 private val s2_waymasks = RegEnable(s1_waymasks, 0.U.asTypeOf(s1_waymasks), s1_fire) 360 361 private val s2_req_vSetIdx = s2_req_vaddr.map(get_idx) 362 private val s2_req_offset = s2_req_vaddr(0)(log2Ceil(blockBytes) - 1, 0) 363 private val s2_req_paddr = getPaddrFromPtag(s2_req_vaddr, s2_req_ptags) 364 365 private val s2_SRAMhits = RegEnable(s1_SRAMhits, 0.U.asTypeOf(s1_SRAMhits), s1_fire) 366 private val s2_codes = RegEnable(s1_codes, 0.U.asTypeOf(s1_codes), s1_fire) 367 private val s2_hits = RegInit(VecInit(Seq.fill(PortNumber)(false.B))) 368 private val s2_datas = RegInit(VecInit(Seq.fill(ICacheDataBanks)(0.U((blockBits / ICacheDataBanks).W)))) 369 private val s2_data_is_from_MSHR = RegInit(VecInit(Seq.fill(ICacheDataBanks)(false.B))) 370 371 /** 372 ****************************************************************************** 373 * ECC check 374 ****************************************************************************** 375 */ 376 // check data error 377 private val s2_bankSel = getBankSel(s2_req_offset, s2_valid) 378 private val s2_bank_corrupt = (0 until ICacheDataBanks).map(i => encodeDataECC(s2_datas(i)) =/= s2_codes(i)) 379 // if data is from MSHR, we don't need to check ECC 380 private val s2_data_corrupt = VecInit((0 until PortNumber).map { port => 381 (0 until ICacheDataBanks).map { bank => 382 s2_bank_corrupt(bank) && s2_bankSel(port)(bank).asBool && !s2_data_is_from_MSHR(bank) 383 }.reduce(_ || _) && s2_SRAMhits(port) 384 }) 385 // force clear data_corrupt when parity check is disabled 386 when(!ecc_enable) { 387 s2_data_corrupt := VecInit(Seq.fill(PortNumber)(false.B)) 388 } 389 // meta error is checked in s1 stage 390 private val s2_meta_corrupt = RegEnable(s1_meta_corrupt, 0.U.asTypeOf(s1_meta_corrupt), s1_fire) 391 // send errors to top 392 // TODO: support RERI spec standard interface 393 (0 until PortNumber).foreach { i => 394 io.errors(i).valid := (s2_meta_corrupt(i) || s2_data_corrupt(i)) && RegNext(s1_fire) 395 io.errors(i).bits.report_to_beu := (s2_meta_corrupt(i) || s2_data_corrupt(i)) && RegNext(s1_fire) 396 io.errors(i).bits.paddr := s2_req_paddr(i) 397 io.errors(i).bits.source := DontCare 398 io.errors(i).bits.source.tag := s2_meta_corrupt(i) 399 io.errors(i).bits.source.data := s2_data_corrupt(i) 400 io.errors(i).bits.source.l2 := false.B 401 io.errors(i).bits.opType := DontCare 402 io.errors(i).bits.opType.fetch := true.B 403 } 404 // flush metaArray to prepare for re-fetch 405 (0 until PortNumber).foreach { i => 406 toMetaFlush(i).valid := (s2_meta_corrupt(i) || s2_data_corrupt(i)) && RegNext(s1_fire) 407 toMetaFlush(i).bits.virIdx := s2_req_vSetIdx(i) 408 // if is meta corrupt, clear all way (since waymask may be unreliable) 409 // if is data corrupt, only clear the way that has error 410 toMetaFlush(i).bits.waymask := Mux(s2_meta_corrupt(i), Fill(nWays, true.B), s2_waymasks(i).asUInt) 411 } 412 // PERF: count the number of data parity errors 413 XSPerfAccumulate("data_corrupt_0", s2_data_corrupt(0) && RegNext(s1_fire)) 414 XSPerfAccumulate("data_corrupt_1", s2_data_corrupt(1) && RegNext(s1_fire)) 415 XSPerfAccumulate("meta_corrupt_0", s2_meta_corrupt(0) && RegNext(s1_fire)) 416 XSPerfAccumulate("meta_corrupt_1", s2_meta_corrupt(1) && RegNext(s1_fire)) 417 // TEST: stop simulation if parity error is detected, and dump wave 418// val (assert_valid, assert_val) = DelayNWithValid(s2_meta_corrupt.reduce(_ || _), s2_valid, 1000) 419// assert(!(assert_valid && assert_val)) 420// val (assert_valid, assert_val) = DelayNWithValid(s2_data_corrupt.reduce(_ || _), s2_valid, 1000) 421// assert(!(assert_valid && assert_val)) 422 423 /** 424 ****************************************************************************** 425 * monitor missUint response port 426 ****************************************************************************** 427 */ 428 private val s2_MSHR_match = VecInit((0 until PortNumber).map { i => 429 (s2_req_vSetIdx(i) === fromMSHR.bits.vSetIdx) && 430 (s2_req_ptags(i) === getPhyTagFromBlk(fromMSHR.bits.blkPaddr)) && 431 fromMSHR.valid // we don't care about whether it's corrupt here 432 }) 433 private val s2_MSHR_hits = Seq(s2_valid && s2_MSHR_match(0), s2_valid && s2_MSHR_match(1) && s2_doubleline) 434 private val s2_MSHR_datas = fromMSHR.bits.data.asTypeOf(Vec(ICacheDataBanks, UInt((blockBits / ICacheDataBanks).W))) 435 436 private val s2_bankIdxLow = (s2_req_offset >> log2Ceil(blockBytes / ICacheDataBanks)).asUInt 437 private val s2_bankMSHRHit = VecInit((0 until ICacheDataBanks).map { i => 438 ((i.U >= s2_bankIdxLow) && s2_MSHR_hits(0)) || ((i.U < s2_bankIdxLow) && s2_MSHR_hits(1)) 439 }) 440 441 (0 until ICacheDataBanks).foreach { i => 442 when(s1_fire) { 443 s2_datas := s1_datas 444 s2_data_is_from_MSHR := s1_data_is_from_MSHR 445 }.elsewhen(s2_bankMSHRHit(i)) { 446 s2_datas(i) := s2_MSHR_datas(i) 447 // also update s2_data_is_from_MSHR when re-fetched, to clear s2_data_corrupt flag and let s2_fire 448 s2_data_is_from_MSHR(i) := true.B 449 } 450 } 451 452 (0 until PortNumber).foreach { i => 453 when(s1_fire) { 454 s2_hits := s1_hits 455 }.elsewhen(s2_MSHR_hits(i)) { 456 // update s2_hits even if it's corrupt, to let s2_fire 457 s2_hits(i) := true.B 458 // also clear s2_meta_corrupt flag when re-fetched, to let s2_fire 459 s2_meta_corrupt(i) := false.B 460 } 461 } 462 463 private val s2_l2_corrupt = RegInit(VecInit(Seq.fill(PortNumber)(false.B))) 464 (0 until PortNumber).foreach { i => 465 when(s1_fire) { 466 s2_l2_corrupt(i) := false.B 467 }.elsewhen(s2_MSHR_hits(i)) { 468 s2_l2_corrupt(i) := fromMSHR.bits.corrupt 469 } 470 } 471 472 /** 473 ****************************************************************************** 474 * send request to MSHR if ICache miss / ECC corrupt 475 ****************************************************************************** 476 */ 477 478 // merge pmp mmio and itlb pbmt 479 private val s2_mmio = VecInit((s2_pmp_mmio zip s2_itlb_pbmt).map { case (mmio, pbmt) => 480 mmio || Pbmt.isUncache(pbmt) 481 }) 482 483 // try re-fetch data from L2 cache if ECC error is detected, unless it's from MSHR 484 private val s2_corrupt_refetch = (s2_meta_corrupt zip s2_data_corrupt).map { 485 case (meta, data) => meta || data 486 } 487 488 /* s2_exception includes itlb pf/gpf/af, pmp af and meta corruption (af), neither of which should be fetched 489 * mmio should not be fetched, it will be fetched by IFU mmio fsm 490 * also, if previous has exception, latter port should also not be fetched 491 */ 492 private val s2_should_fetch = VecInit((0 until PortNumber).map { i => 493 (!s2_hits(i) || s2_corrupt_refetch(i)) && 494 (if (i == 0) true.B else s2_doubleline) && 495 !ExceptionType.hasException(s2_exception.take(i + 1)) && 496 s2_mmio.take(i + 1).map(!_).reduce(_ && _) 497 }) 498 499 private val toMSHRArbiter = Module(new Arbiter(new ICacheMissReq, PortNumber)) 500 501 // To avoid sending duplicate requests. 502 private val s2_has_send = RegInit(VecInit(Seq.fill(PortNumber)(false.B))) 503 (0 until PortNumber).foreach { i => 504 when(s1_fire) { 505 s2_has_send(i) := false.B 506 }.elsewhen(toMSHRArbiter.io.in(i).fire) { 507 s2_has_send(i) := true.B 508 } 509 } 510 511 (0 until PortNumber).foreach { i => 512 toMSHRArbiter.io.in(i).valid := s2_valid && s2_should_fetch(i) && !s2_has_send(i) && !s2_flush 513 toMSHRArbiter.io.in(i).bits.blkPaddr := getBlkAddr(s2_req_paddr(i)) 514 toMSHRArbiter.io.in(i).bits.vSetIdx := s2_req_vSetIdx(i) 515 } 516 toMSHR <> toMSHRArbiter.io.out 517 518 XSPerfAccumulate("to_missUnit_stall", toMSHR.valid && !toMSHR.ready) 519 520 private val s2_fetch_finish = !s2_should_fetch.reduce(_ || _) 521 522 // also raise af if l2 corrupt is detected 523 private val s2_l2_exception = VecInit(s2_l2_corrupt.map(ExceptionType.fromTilelink)) 524 // NOTE: do NOT raise af if meta/data corrupt is detected, they are automatically recovered by re-fetching from L2 525 526 // merge s2 exceptions, itlb has the highest priority, then l2 527 private val s2_exception_out = ExceptionType.merge( 528 s2_exception, // includes itlb/pmp exception 529 s2_l2_exception 530 ) 531 532 /** 533 ****************************************************************************** 534 * response to IFU 535 ****************************************************************************** 536 */ 537 toIFU.valid := s2_fire 538 toIFU.bits.doubleline := s2_doubleline 539 toIFU.bits.data := s2_datas.asTypeOf(UInt(blockBits.W)) 540 toIFU.bits.backendException := s2_backendException 541 (0 until PortNumber).foreach { i => 542 toIFU.bits.vaddr(i) := s2_req_vaddr(i) 543 toIFU.bits.paddr(i) := s2_req_paddr(i) 544 val needThisLine = if (i == 0) true.B else s2_doubleline 545 toIFU.bits.exception(i) := Mux(needThisLine, s2_exception_out(i), ExceptionType.none) 546 toIFU.bits.pmp_mmio(i) := Mux(needThisLine, s2_pmp_mmio(i), false.B) 547 toIFU.bits.itlb_pbmt(i) := Mux(needThisLine, s2_itlb_pbmt(i), Pbmt.pma) 548 } 549 // valid only for the first gpf 550 toIFU.bits.gpaddr := s2_req_gpaddr 551 toIFU.bits.isForVSnonLeafPTE := s2_req_isForVSnonLeafPTE 552 553 s2_flush := io.flush 554 s2_ready := (s2_fetch_finish && !io.respStall) || !s2_valid 555 s2_fire := s2_valid && s2_fetch_finish && !io.respStall && !s2_flush 556 557 /** 558 ****************************************************************************** 559 * report Tilelink corrupt error 560 ****************************************************************************** 561 */ 562 (0 until PortNumber).foreach { i => 563 when(RegNext(s2_fire && s2_l2_corrupt(i))) { 564 io.errors(i).valid := true.B 565 io.errors(i).bits.report_to_beu := false.B // l2 should have report that to bus error unit, no need to do it again 566 io.errors(i).bits.paddr := RegNext(s2_req_paddr(i)) 567 io.errors(i).bits.source.tag := false.B 568 io.errors(i).bits.source.data := false.B 569 io.errors(i).bits.source.l2 := true.B 570 } 571 } 572 573 /** 574 ****************************************************************************** 575 * performance info. TODO: need to simplify the logic 576 ***********************************************************s******************* 577 */ 578 io.perfInfo.only_0_hit := s2_hits(0) && !s2_doubleline 579 io.perfInfo.only_0_miss := !s2_hits(0) && !s2_doubleline 580 io.perfInfo.hit_0_hit_1 := s2_hits(0) && s2_hits(1) && s2_doubleline 581 io.perfInfo.hit_0_miss_1 := s2_hits(0) && !s2_hits(1) && s2_doubleline 582 io.perfInfo.miss_0_hit_1 := !s2_hits(0) && s2_hits(1) && s2_doubleline 583 io.perfInfo.miss_0_miss_1 := !s2_hits(0) && !s2_hits(1) && s2_doubleline 584 io.perfInfo.hit_0_except_1 := s2_hits(0) && ExceptionType.hasException(s2_exception(1)) && s2_doubleline 585 io.perfInfo.miss_0_except_1 := !s2_hits(0) && ExceptionType.hasException(s2_exception(1)) && s2_doubleline 586 io.perfInfo.bank_hit(0) := s2_hits(0) 587 io.perfInfo.bank_hit(1) := s2_hits(1) && s2_doubleline 588 io.perfInfo.except_0 := ExceptionType.hasException(s2_exception(0)) 589 io.perfInfo.hit := s2_hits(0) && (!s2_doubleline || s2_hits(1)) 590 591 /** <PERF> fetch bubble generated by icache miss */ 592 XSPerfAccumulate("icache_bubble_s2_miss", s2_valid && !s2_fetch_finish) 593 XSPerfAccumulate("icache_bubble_s0_wayLookup", s0_valid && !fromWayLookup.ready) 594 595 io.fetch.topdownIcacheMiss := !s2_fetch_finish 596 io.fetch.topdownItlbMiss := s0_valid && !fromWayLookup.ready 597 598 // class ICacheTouchDB(implicit p: Parameters) extends ICacheBundle{ 599 // val blkPaddr = UInt((PAddrBits - blockOffBits).W) 600 // val vSetIdx = UInt(idxBits.W) 601 // val waymask = UInt(wayBits.W) 602 // } 603 604 // private val isWriteICacheTouchTable = 605 // WireInit(Constantin.createRecord("isWriteICacheTouchTable" + p(XSCoreParamsKey).HartId.toString)) 606 // private val ICacheTouchTable = 607 // ChiselDB.createTable("ICacheTouchTable" + p(XSCoreParamsKey).HartId.toString, new ICacheTouchDB) 608 609 // val ICacheTouchDumpData = Wire(Vec(PortNumber, new ICacheTouchDB)) 610 // (0 until PortNumber).foreach{ i => 611 // ICacheTouchDumpData(i).blkPaddr := getBlkAddr(s2_req_paddr(i)) 612 // ICacheTouchDumpData(i).vSetIdx := s2_req_vSetIdx(i) 613 // ICacheTouchDumpData(i).waymask := OHToUInt(s2_tag_match_vec(i)) 614 // ICacheTouchTable.log( 615 // data = ICacheTouchDumpData(i), 616 // en = io.touch(i).valid, 617 // site = "req_" + i.toString, 618 // clock = clock, 619 // reset = reset 620 // ) 621 // } 622 623 /** 624 ****************************************************************************** 625 * difftest refill check 626 ****************************************************************************** 627 */ 628 if (env.EnableDifftest) { 629 val discards = (0 until PortNumber).map { i => 630 ExceptionType.hasException(toIFU.bits.exception(i)) || 631 toIFU.bits.pmp_mmio(i) || 632 Pbmt.isUncache(toIFU.bits.itlb_pbmt(i)) 633 } 634 val blkPaddrAll = s2_req_paddr.map(addr => (addr(PAddrBits - 1, blockOffBits) << blockOffBits).asUInt) 635 (0 until ICacheDataBanks).foreach { i => 636 val diffMainPipeOut = DifftestModule(new DiffRefillEvent, dontCare = true) 637 diffMainPipeOut.coreid := io.hartId 638 diffMainPipeOut.index := (3 + i).U 639 640 val bankSel = getBankSel(s2_req_offset, s2_valid).reduce(_ | _) 641 val lineSel = getLineSel(s2_req_offset) 642 643 diffMainPipeOut.valid := s2_fire && bankSel(i).asBool && Mux(lineSel(i), !discards(1), !discards(0)) 644 diffMainPipeOut.addr := Mux( 645 lineSel(i), 646 blkPaddrAll(1) + (i.U << log2Ceil(blockBytes / ICacheDataBanks)).asUInt, 647 blkPaddrAll(0) + (i.U << log2Ceil(blockBytes / ICacheDataBanks)).asUInt 648 ) 649 650 diffMainPipeOut.data := s2_datas(i).asTypeOf(diffMainPipeOut.data) 651 diffMainPipeOut.idtfr := DontCare 652 } 653 } 654} 655