1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.frontend.icache 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import freechips.rocketchip.tilelink.ClientStates 23import xiangshan._ 24import xiangshan.cache.mmu._ 25import utils._ 26import utility._ 27import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle} 28import xiangshan.frontend.{FtqICacheInfo, FtqToICacheRequestBundle} 29 30class ICacheMainPipeReq(implicit p: Parameters) extends ICacheBundle 31{ 32 val vaddr = UInt(VAddrBits.W) 33 def vsetIdx = get_idx(vaddr) 34} 35 36class ICacheMainPipeResp(implicit p: Parameters) extends ICacheBundle 37{ 38 val vaddr = UInt(VAddrBits.W) 39 val registerData = UInt(blockBits.W) 40 val sramData = UInt(blockBits.W) 41 val select = Bool() 42 val paddr = UInt(PAddrBits.W) 43 val tlbExcp = new Bundle{ 44 val pageFault = Bool() 45 val accessFault = Bool() 46 val mmio = Bool() 47 } 48} 49 50class ICacheMainPipeBundle(implicit p: Parameters) extends ICacheBundle 51{ 52 val req = Flipped(Decoupled(new FtqToICacheRequestBundle)) 53 val resp = Vec(PortNumber, ValidIO(new ICacheMainPipeResp)) 54} 55 56class ICacheMetaReqBundle(implicit p: Parameters) extends ICacheBundle{ 57 val toIMeta = DecoupledIO(new ICacheReadBundle) 58 val fromIMeta = Input(new ICacheMetaRespBundle) 59} 60 61class ICacheDataReqBundle(implicit p: Parameters) extends ICacheBundle{ 62 val toIData = DecoupledIO(Vec(partWayNum, new ICacheReadBundle)) 63 val fromIData = Input(new ICacheDataRespBundle) 64} 65 66class ICacheMSHRBundle(implicit p: Parameters) extends ICacheBundle{ 67 val toMSHR = Decoupled(new ICacheMissReq) 68 val fromMSHR = Flipped(ValidIO(new ICacheMissResp)) 69} 70 71class ICachePMPBundle(implicit p: Parameters) extends ICacheBundle{ 72 val req = Valid(new PMPReqBundle()) 73 val resp = Input(new PMPRespBundle()) 74} 75 76class ICachePerfInfo(implicit p: Parameters) extends ICacheBundle{ 77 val only_0_hit = Bool() 78 val only_0_miss = Bool() 79 val hit_0_hit_1 = Bool() 80 val hit_0_miss_1 = Bool() 81 val miss_0_hit_1 = Bool() 82 val miss_0_miss_1 = Bool() 83 val hit_0_except_1 = Bool() 84 val miss_0_except_1 = Bool() 85 val except_0 = Bool() 86 val bank_hit = Vec(2,Bool()) 87 val hit = Bool() 88} 89 90class ICacheMainPipeInterface(implicit p: Parameters) extends ICacheBundle { 91 /*** internal interface ***/ 92 val metaArray = new ICacheMetaReqBundle 93 val dataArray = new ICacheDataReqBundle 94 val mshr = Vec(PortNumber, new ICacheMSHRBundle) 95 val errors = Output(Vec(PortNumber, new L1CacheErrorInfo)) 96 /*** outside interface ***/ 97 //val fetch = Vec(PortNumber, new ICacheMainPipeBundle) 98 /* when ftq.valid is high in T + 1 cycle 99 * the ftq component must be valid in T cycle 100 */ 101 val fetch = new ICacheMainPipeBundle 102 val pmp = Vec(PortNumber, new ICachePMPBundle) 103 val itlb = Vec(PortNumber, new TlbRequestIO) 104 val respStall = Input(Bool()) 105 val perfInfo = Output(new ICachePerfInfo) 106 107 val prefetchEnable = Output(Bool()) 108 val prefetchDisable = Output(Bool()) 109 val csr_parity_enable = Input(Bool()) 110 111} 112 113class ICacheMainPipe(implicit p: Parameters) extends ICacheModule 114{ 115 val io = IO(new ICacheMainPipeInterface) 116 117 /** Input/Output port */ 118 val (fromFtq, toIFU) = (io.fetch.req, io.fetch.resp) 119 val (toMeta, metaResp) = (io.metaArray.toIMeta, io.metaArray.fromIMeta) 120 val (toData, dataResp) = (io.dataArray.toIData, io.dataArray.fromIData) 121 val (toMSHR, fromMSHR) = (io.mshr.map(_.toMSHR), io.mshr.map(_.fromMSHR)) 122 val (toITLB, fromITLB) = (io.itlb.map(_.req), io.itlb.map(_.resp)) 123 val (toPMP, fromPMP) = (io.pmp.map(_.req), io.pmp.map(_.resp)) 124 io.itlb.foreach(_.req_kill := false.B) 125 126 //Ftq RegNext Register 127 val fromFtqReq = fromFtq.bits.pcMemRead 128 129 /** pipeline control signal */ 130 val s1_ready, s2_ready = Wire(Bool()) 131 val s0_fire, s1_fire , s2_fire = Wire(Bool()) 132 133 val missSwitchBit = RegInit(false.B) 134 135 /** replacement status register */ 136 val touch_sets = Seq.fill(2)(Wire(Vec(2, UInt(log2Ceil(nSets/2).W)))) 137 val touch_ways = Seq.fill(2)(Wire(Vec(2, Valid(UInt(log2Ceil(nWays).W)))) ) 138 139 /** 140 ****************************************************************************** 141 * ICache Stage 0 142 * - send req to ITLB and wait for tlb miss fixing 143 * - send req to Meta/Data SRAM 144 ****************************************************************************** 145 */ 146 147 /** s0 control */ 148 val s0_valid = fromFtq.valid 149 val s0_req_vaddr = (0 until partWayNum + 1).map(i => VecInit(Seq(fromFtqReq(i).startAddr, fromFtqReq(i).nextlineStart))) 150 val s0_req_vsetIdx = (0 until partWayNum + 1).map(i => VecInit(s0_req_vaddr(i).map(get_idx(_)))) 151 val s0_only_first = (0 until partWayNum + 1).map(i => fromFtq.bits.readValid(i) && !fromFtqReq(i).crossCacheline) 152 val s0_double_line = (0 until partWayNum + 1).map(i => fromFtq.bits.readValid(i) && fromFtqReq(i).crossCacheline) 153 154 val s0_final_valid = s0_valid 155 val s0_final_vaddr = s0_req_vaddr.head 156 val s0_final_vsetIdx = s0_req_vsetIdx.head 157 val s0_final_only_first = s0_only_first.head 158 val s0_final_double_line = s0_double_line.head 159 160 /** SRAM request */ 161 //0 -> metaread, 1,2,3 -> data, 3 -> code 4 -> itlb 162 // TODO: it seems like 0,1,2,3 -> dataArray(data); 3 -> dataArray(code); 0 -> metaArray; 4 -> itlb 163 val ftq_req_to_data_doubleline = s0_double_line.init 164 val ftq_req_to_data_vset_idx = s0_req_vsetIdx.init 165 val ftq_req_to_data_valid = fromFtq.bits.readValid.init 166 167 val ftq_req_to_meta_doubleline = s0_double_line.head 168 val ftq_req_to_meta_vset_idx = s0_req_vsetIdx.head 169 170 val ftq_req_to_itlb_only_first = s0_only_first.last 171 val ftq_req_to_itlb_doubleline = s0_double_line.last 172 val ftq_req_to_itlb_vaddr = s0_req_vaddr.last 173 val ftq_req_to_itlb_vset_idx = s0_req_vsetIdx.last 174 175 176 for(i <- 0 until partWayNum) { 177 toData.valid := ftq_req_to_data_valid(i) && !missSwitchBit 178 toData.bits(i).isDoubleLine := ftq_req_to_data_doubleline(i) 179 toData.bits(i).vSetIdx := ftq_req_to_data_vset_idx(i) 180 } 181 182 toMeta.valid := s0_valid && !missSwitchBit 183 toMeta.bits.isDoubleLine := ftq_req_to_meta_doubleline 184 toMeta.bits.vSetIdx := ftq_req_to_meta_vset_idx 185 186 187 toITLB(0).valid := s0_valid 188 toITLB(0).bits.size := 3.U // TODO: fix the size 189 toITLB(0).bits.vaddr := ftq_req_to_itlb_vaddr(0) 190 toITLB(0).bits.debug.pc := ftq_req_to_itlb_vaddr(0) 191 192 toITLB(1).valid := s0_valid && ftq_req_to_itlb_doubleline 193 toITLB(1).bits.size := 3.U // TODO: fix the size 194 toITLB(1).bits.vaddr := ftq_req_to_itlb_vaddr(1) 195 toITLB(1).bits.debug.pc := ftq_req_to_itlb_vaddr(1) 196 197 toITLB.map{port => 198 port.bits.cmd := TlbCmd.exec 199 port.bits.memidx := DontCare 200 port.bits.debug.robIdx := DontCare 201 port.bits.no_translate := false.B 202 port.bits.debug.isFirstIssue := DontCare 203 } 204 205 /** ITLB & ICACHE sync case 206 * when icache is not ready, but itlb is ready 207 * because itlb is non-block, then the req will take the port 208 * then itlb will unset the ready?? itlb is wrongly blocked. 209 * Solution: maybe give itlb a signal to tell whether acquire the slot? 210 */ 211 212 val itlb_can_go = toITLB(0).ready && toITLB(1).ready 213 val icache_can_go = toData.ready && toMeta.ready 214 val pipe_can_go = !missSwitchBit && s1_ready 215 val s0_can_go = itlb_can_go && icache_can_go && pipe_can_go 216 val s0_fetch_fire = s0_valid && s0_can_go 217 s0_fire := s0_fetch_fire 218 toITLB.map{port => port.bits.kill := !icache_can_go || !pipe_can_go} 219 220 //TODO: fix GTimer() condition 221 fromFtq.ready := s0_can_go 222 223 /** 224 ****************************************************************************** 225 * ICache Stage 1 226 * - get tlb resp data (exceptiong info and physical addresses) 227 * - get Meta/Data SRAM read responses (latched for pipeline stop) 228 * - tag compare/hit check 229 ****************************************************************************** 230 */ 231 232 /** s1 control */ 233 234 val s1_valid = generatePipeControl(lastFire = s0_fire, thisFire = s1_fire, thisFlush = false.B, lastFlush = false.B) 235 236 val s1_req_vaddr = RegEnable(s0_final_vaddr, s0_fire) 237 val s1_req_vsetIdx = RegEnable(s0_final_vsetIdx, s0_fire) 238 val s1_only_first = RegEnable(s0_final_only_first, s0_fire) 239 val s1_double_line = RegEnable(s0_final_double_line, s0_fire) 240 241 /** tlb response latch for pipeline stop */ 242 val tlb_back = fromITLB.map(_.fire()) 243 val tlb_need_back = VecInit((0 until PortNumber).map(i => ValidHold(s0_fire && toITLB(i).fire(), s1_fire, false.B))) 244 val tlb_already_recv = RegInit(VecInit(Seq.fill(PortNumber)(false.B))) 245 val tlb_ready_recv = VecInit((0 until PortNumber).map(i => RegNext(s0_fire, false.B) || (s1_valid && !tlb_already_recv(i)))) 246 val tlb_resp_valid = Wire(Vec(2, Bool())) 247 for (i <- 0 until PortNumber) { 248 tlb_resp_valid(i) := tlb_already_recv(i) || (tlb_ready_recv(i) && tlb_back(i)) 249 when (tlb_already_recv(i) && s1_fire) { 250 tlb_already_recv(i) := false.B 251 } 252 when (tlb_back(i) && tlb_ready_recv(i) && !s1_fire) { 253 tlb_already_recv(i) := true.B 254 } 255 fromITLB(i).ready := tlb_ready_recv(i) 256 } 257 assert(RegNext(Cat((0 until PortNumber).map(i => tlb_need_back(i) || !tlb_resp_valid(i))).andR(), true.B), 258 "when tlb should not back, tlb should not resp valid") 259 assert(RegNext(!s1_valid || Cat(tlb_need_back).orR, true.B), "when s1_valid, need at least one tlb_need_back") 260 assert(RegNext(s1_valid || !Cat(tlb_need_back).orR, true.B), "when !s1_valid, all the tlb_need_back should be false") 261 assert(RegNext(s1_valid || !Cat(tlb_already_recv).orR, true.B), "when !s1_valid, should not tlb_already_recv") 262 assert(RegNext(s1_valid || !Cat(tlb_resp_valid).orR, true.B), "when !s1_valid, should not tlb_resp_valid") 263 264 val tlbRespPAddr = VecInit((0 until PortNumber).map(i => ResultHoldBypass(valid = tlb_back(i), data = fromITLB(i).bits.paddr(0)))) 265 val tlbExcpPF = VecInit((0 until PortNumber).map(i => ResultHoldBypass(valid = tlb_back(i), data = fromITLB(i).bits.excp(0).pf.instr) && tlb_need_back(i))) 266 val tlbExcpAF = VecInit((0 until PortNumber).map(i => ResultHoldBypass(valid = tlb_back(i), data = fromITLB(i).bits.excp(0).af.instr) && tlb_need_back(i))) 267 val tlbExcp = VecInit((0 until PortNumber).map(i => tlbExcpPF(i) || tlbExcpPF(i))) 268 269 val tlbRespAllValid = Cat((0 until PortNumber).map(i => !tlb_need_back(i) || tlb_resp_valid(i))).andR 270 s1_ready := s2_ready && tlbRespAllValid || !s1_valid 271 s1_fire := s1_valid && tlbRespAllValid && s2_ready 272 273 /** s1 hit check/tag compare */ 274 val s1_req_paddr = tlbRespPAddr 275 val s1_req_ptags = VecInit(s1_req_paddr.map(get_phy_tag(_))) 276 277 val s1_meta_ptags = ResultHoldBypass(data = metaResp.tags, valid = RegNext(s0_fire)) 278// val s1_meta_cohs = ResultHoldBypass(data = metaResp.cohs, valid = RegNext(s0_fire)) 279 val s1_meta_valids = ResultHoldBypass(data = metaResp.entryValid, valid = RegNext(s0_fire)) 280 val s1_meta_errors = ResultHoldBypass(data = metaResp.errors, valid = RegNext(s0_fire)) 281 282 val s1_data_cacheline = ResultHoldBypass(data = dataResp.datas, valid = RegNext(s0_fire)) 283 val s1_data_errorBits = ResultHoldBypass(data = dataResp.codes, valid = RegNext(s0_fire)) 284 285 val s1_tag_eq_vec = VecInit((0 until PortNumber).map( p => VecInit((0 until nWays).map( w => s1_meta_ptags(p)(w) === s1_req_ptags(p) )))) 286 val s1_tag_match_vec = VecInit((0 until PortNumber).map( k => VecInit(s1_tag_eq_vec(k).zipWithIndex.map{ case(way_tag_eq, w) => way_tag_eq && s1_meta_valids(k)(w) /*s1_meta_cohs(k)(w).isValid()*/}))) 287 val s1_tag_match = VecInit(s1_tag_match_vec.map(vector => ParallelOR(vector))) 288 289 val s1_port_hit = VecInit(Seq(s1_tag_match(0) && s1_valid && !tlbExcp(0), s1_tag_match(1) && s1_valid && s1_double_line && !tlbExcp(1) )) 290 val s1_bank_miss = VecInit(Seq(!s1_tag_match(0) && s1_valid && !tlbExcp(0), !s1_tag_match(1) && s1_valid && s1_double_line && !tlbExcp(1) )) 291 val s1_hit = (s1_port_hit(0) && s1_port_hit(1)) || (!s1_double_line && s1_port_hit(0)) 292 293 /** choose victim cacheline */ 294 val replacers = Seq.fill(PortNumber)(ReplacementPolicy.fromString(cacheParams.replacer,nWays,nSets/PortNumber)) 295 val s1_victim_oh = ResultHoldBypass(data = VecInit(replacers.zipWithIndex.map{case (replacer, i) => UIntToOH(replacer.way(s1_req_vsetIdx(i)))}), valid = RegNext(s0_fire)) 296 297 298 when(s1_fire){ 299 assert(PopCount(s1_tag_match_vec(0)) <= 1.U && (PopCount(s1_tag_match_vec(1)) <= 1.U || !s1_double_line), 300 "Multiple hit in main pipe, port0:is=%d,ptag=0x%x,vidx=0x%x,vaddr=0x%x port1:is=%d,ptag=0x%x,vidx=0x%x,vaddr=0x%x ", 301 PopCount(s1_tag_match_vec(0)) > 1.U,s1_req_ptags(0), get_idx(s1_req_vaddr(0)), s1_req_vaddr(0), 302 PopCount(s1_tag_match_vec(1)) > 1.U && s1_double_line, s1_req_ptags(1), get_idx(s1_req_vaddr(1)), s1_req_vaddr(1)) 303 } 304 305 ((replacers zip touch_sets) zip touch_ways).map{case ((r, s),w) => r.access(s,w)} 306 307 308 /** <PERF> replace victim way number */ 309 310 (0 until nWays).map{ w => 311 XSPerfAccumulate("line_0_hit_way_" + Integer.toString(w, 10), s1_fire && s1_port_hit(0) && OHToUInt(s1_tag_match_vec(0)) === w.U) 312 } 313 314 (0 until nWays).map{ w => 315 XSPerfAccumulate("line_0_victim_way_" + Integer.toString(w, 10), s1_fire && !s1_port_hit(0) && OHToUInt(s1_victim_oh(0)) === w.U) 316 } 317 318 (0 until nWays).map{ w => 319 XSPerfAccumulate("line_1_hit_way_" + Integer.toString(w, 10), s1_fire && s1_double_line && s1_port_hit(1) && OHToUInt(s1_tag_match_vec(1)) === w.U) 320 } 321 322 (0 until nWays).map{ w => 323 XSPerfAccumulate("line_1_victim_way_" + Integer.toString(w, 10), s1_fire && s1_double_line && !s1_port_hit(1) && OHToUInt(s1_victim_oh(1)) === w.U) 324 } 325 326 /** 327 ****************************************************************************** 328 * ICache Stage 2 329 * - send request to MSHR if ICache miss 330 * - generate secondary miss status/data registers 331 * - response to IFU 332 ****************************************************************************** 333 */ 334 335 /** s2 control */ 336 val s2_fetch_finish = Wire(Bool()) 337 338 val s2_valid = generatePipeControl(lastFire = s1_fire, thisFire = s2_fire, thisFlush = false.B, lastFlush = false.B) 339 val s2_miss_available = Wire(Bool()) 340 341 s2_ready := (s2_valid && s2_fetch_finish && !io.respStall) || (!s2_valid && s2_miss_available) 342 s2_fire := s2_valid && s2_fetch_finish && !io.respStall 343 344 /** s2 data */ 345 val mmio = fromPMP.map(port => port.mmio) // TODO: handle it 346 347 val (s2_req_paddr , s2_req_vaddr) = (RegEnable(s1_req_paddr, s1_fire), RegEnable(s1_req_vaddr, s1_fire)) 348 val s2_req_vsetIdx = RegEnable(s1_req_vsetIdx, s1_fire) 349 val s2_req_ptags = RegEnable(s1_req_ptags, s1_fire) 350 val s2_only_first = RegEnable(s1_only_first, s1_fire) 351 val s2_double_line = RegEnable(s1_double_line, s1_fire) 352 val s2_hit = RegEnable(s1_hit , s1_fire) 353 val s2_port_hit = RegEnable(s1_port_hit, s1_fire) 354 val s2_bank_miss = RegEnable(s1_bank_miss, s1_fire) 355 val s2_waymask = RegEnable(s1_victim_oh, s1_fire) 356// val s2_victim_coh = RegEnable(s1_victim_coh, s1_fire) 357 val s2_tag_match_vec = RegEnable(s1_tag_match_vec, s1_fire) 358 359 assert(RegNext(!s2_valid || s2_req_paddr(0)(11,0) === s2_req_vaddr(0)(11,0), true.B)) 360 361 /** status imply that s2 is a secondary miss (no need to resend miss request) */ 362 val sec_meet_vec = Wire(Vec(2, Bool())) 363 val s2_fixed_hit_vec = VecInit((0 until 2).map(i => s2_port_hit(i) || sec_meet_vec(i))) 364 val s2_fixed_hit = (s2_valid && s2_fixed_hit_vec(0) && s2_fixed_hit_vec(1) && s2_double_line) || (s2_valid && s2_fixed_hit_vec(0) && !s2_double_line) 365 366 val s2_meta_errors = RegEnable(s1_meta_errors, s1_fire) 367 val s2_data_errorBits = RegEnable(s1_data_errorBits, s1_fire) 368 val s2_data_cacheline = RegEnable(s1_data_cacheline, s1_fire) 369 370 val s2_data_errors = Wire(Vec(PortNumber,Vec(nWays, Bool()))) 371 372 (0 until PortNumber).map{ i => 373 val read_datas = s2_data_cacheline(i).asTypeOf(Vec(nWays,Vec(dataCodeUnitNum, UInt(dataCodeUnit.W)))) 374 val read_codes = s2_data_errorBits(i).asTypeOf(Vec(nWays,Vec(dataCodeUnitNum, UInt(dataCodeBits.W)))) 375 val data_full_wayBits = VecInit((0 until nWays).map( w => 376 VecInit((0 until dataCodeUnitNum).map(u => 377 Cat(read_codes(w)(u), read_datas(w)(u)))))) 378 val data_error_wayBits = VecInit((0 until nWays).map( w => 379 VecInit((0 until dataCodeUnitNum).map(u => 380 cacheParams.dataCode.decode(data_full_wayBits(w)(u)).error )))) 381 if(i == 0){ 382 (0 until nWays).map{ w => 383 s2_data_errors(i)(w) := RegNext(RegNext(s1_fire)) && RegNext(data_error_wayBits(w)).reduce(_||_) 384 } 385 } else { 386 (0 until nWays).map{ w => 387 s2_data_errors(i)(w) := RegNext(RegNext(s1_fire)) && RegNext(RegNext(s1_double_line)) && RegNext(data_error_wayBits(w)).reduce(_||_) 388 } 389 } 390 } 391 392 val s2_parity_meta_error = VecInit((0 until PortNumber).map(i => s2_meta_errors(i).reduce(_||_) && io.csr_parity_enable)) 393 val s2_parity_data_error = VecInit((0 until PortNumber).map(i => s2_data_errors(i).reduce(_||_) && io.csr_parity_enable)) 394 val s2_parity_error = VecInit((0 until PortNumber).map(i => RegNext(s2_parity_meta_error(i)) || s2_parity_data_error(i))) 395 396 for(i <- 0 until PortNumber){ 397 io.errors(i).valid := RegNext(s2_parity_error(i) && RegNext(RegNext(s1_fire))) 398 io.errors(i).report_to_beu := RegNext(s2_parity_error(i) && RegNext(RegNext(s1_fire))) 399 io.errors(i).paddr := RegNext(RegNext(s2_req_paddr(i))) 400 io.errors(i).source := DontCare 401 io.errors(i).source.tag := RegNext(RegNext(s2_parity_meta_error(i))) 402 io.errors(i).source.data := RegNext(s2_parity_data_error(i)) 403 io.errors(i).source.l2 := false.B 404 io.errors(i).opType := DontCare 405 io.errors(i).opType.fetch := true.B 406 } 407 XSError(s2_parity_error.reduce(_||_) && RegNext(RegNext(s1_fire)), "ICache has parity error in MainPaipe!") 408 409 410 /** exception and pmp logic **/ 411 //PMP Result 412 val s2_tlb_need_back = VecInit((0 until PortNumber).map(i => ValidHold(tlb_need_back(i) && s1_fire, s2_fire, false.B))) 413 val pmpExcpAF = Wire(Vec(PortNumber, Bool())) 414 pmpExcpAF(0) := fromPMP(0).instr && s2_tlb_need_back(0) 415 pmpExcpAF(1) := fromPMP(1).instr && s2_double_line && s2_tlb_need_back(1) 416 //exception information 417 //short delay exception signal 418 val s2_except_pf = RegEnable(tlbExcpPF, s1_fire) 419 val s2_except_tlb_af = RegEnable(tlbExcpAF, s1_fire) 420 //long delay exception signal 421 val s2_except_pmp_af = DataHoldBypass(pmpExcpAF, RegNext(s1_fire)) 422 // val s2_except_parity_af = VecInit(s2_parity_error(i) && RegNext(RegNext(s1_fire)) ) 423 424 val s2_except = VecInit((0 until 2).map{i => s2_except_pf(i) || s2_except_tlb_af(i)}) 425 val s2_has_except = s2_valid && (s2_except_tlb_af.reduce(_||_) || s2_except_pf.reduce(_||_)) 426 //MMIO 427 val s2_mmio = DataHoldBypass(io.pmp(0).resp.mmio && !s2_except_tlb_af(0) && !s2_except_pmp_af(0) && !s2_except_pf(0), RegNext(s1_fire)).asBool() && s2_valid 428 429 //send physical address to PMP 430 io.pmp.zipWithIndex.map { case (p, i) => 431 p.req.valid := s2_valid && !missSwitchBit 432 p.req.bits.addr := s2_req_paddr(i) 433 p.req.bits.size := 3.U // TODO 434 p.req.bits.cmd := TlbCmd.exec 435 } 436 437 /*** cacheline miss logic ***/ 438 val wait_idle :: wait_queue_ready :: wait_send_req :: wait_two_resp :: wait_0_resp :: wait_1_resp :: wait_one_resp ::wait_finish :: wait_pmp_except :: Nil = Enum(9) 439 val wait_state = RegInit(wait_idle) 440 441 val port_miss_fix = VecInit(Seq(fromMSHR(0).fire() && !s2_port_hit(0), fromMSHR(1).fire() && s2_double_line && !s2_port_hit(1) )) 442 443 // secondary miss record registers 444 class MissSlot(implicit p: Parameters) extends ICacheBundle { 445 val m_vSetIdx = UInt(idxBits.W) 446 val m_pTag = UInt(tagBits.W) 447 val m_data = UInt(blockBits.W) 448 val m_corrupt = Bool() 449 } 450 451 val missSlot = Seq.fill(2)(RegInit(0.U.asTypeOf(new MissSlot))) 452 val m_invalid :: m_valid :: m_refilled :: m_flushed :: m_wait_sec_miss :: m_check_final ::Nil = Enum(6) 453 val missStateQueue = RegInit(VecInit(Seq.fill(2)(m_invalid)) ) 454 val reservedRefillData = Wire(Vec(2, UInt(blockBits.W))) 455 456 s2_miss_available := VecInit(missStateQueue.map(entry => entry === m_invalid || entry === m_wait_sec_miss)).reduce(_&&_) 457 458 val fix_sec_miss = Wire(Vec(4, Bool())) 459 val sec_meet_0_miss = fix_sec_miss(0) || fix_sec_miss(2) 460 val sec_meet_1_miss = fix_sec_miss(1) || fix_sec_miss(3) 461 sec_meet_vec := VecInit(Seq(sec_meet_0_miss,sec_meet_1_miss )) 462 463 /*** miss/hit pattern: <Control Signal> only raise at the first cycle of s2_valid ***/ 464 val cacheline_0_hit = (s2_port_hit(0) || sec_meet_0_miss) 465 val cacheline_0_miss = !s2_port_hit(0) && !sec_meet_0_miss 466 467 val cacheline_1_hit = (s2_port_hit(1) || sec_meet_1_miss) 468 val cacheline_1_miss = !s2_port_hit(1) && !sec_meet_1_miss 469 470 val only_0_miss = RegNext(s1_fire) && cacheline_0_miss && !s2_double_line && !s2_has_except && !s2_mmio 471 val only_0_hit = RegNext(s1_fire) && cacheline_0_hit && !s2_double_line && !s2_mmio 472 val hit_0_hit_1 = RegNext(s1_fire) && cacheline_0_hit && cacheline_1_hit && s2_double_line && !s2_mmio 473 val hit_0_miss_1 = RegNext(s1_fire) && cacheline_0_hit && cacheline_1_miss && s2_double_line && !s2_has_except && !s2_mmio 474 val miss_0_hit_1 = RegNext(s1_fire) && cacheline_0_miss && cacheline_1_hit && s2_double_line && !s2_has_except && !s2_mmio 475 val miss_0_miss_1 = RegNext(s1_fire) && cacheline_0_miss && cacheline_1_miss && s2_double_line && !s2_has_except && !s2_mmio 476 477 val hit_0_except_1 = RegNext(s1_fire) && s2_double_line && !s2_except(0) && s2_except(1) && cacheline_0_hit 478 val miss_0_except_1 = RegNext(s1_fire) && s2_double_line && !s2_except(0) && s2_except(1) && cacheline_0_miss 479 val except_0 = RegNext(s1_fire) && s2_except(0) 480 481 def holdReleaseLatch(valid: Bool, release: Bool, flush: Bool): Bool ={ 482 val bit = RegInit(false.B) 483 when(flush) { bit := false.B } 484 .elsewhen(valid && !release) { bit := true.B } 485 .elsewhen(release) { bit := false.B } 486 bit || valid 487 } 488 489 /*** miss/hit pattern latch: <Control Signal> latch the miss/hit patter if pipeline stop ***/ 490 val miss_0_hit_1_latch = holdReleaseLatch(valid = miss_0_hit_1, release = s2_fire, flush = false.B) 491 val miss_0_miss_1_latch = holdReleaseLatch(valid = miss_0_miss_1, release = s2_fire, flush = false.B) 492 val only_0_miss_latch = holdReleaseLatch(valid = only_0_miss, release = s2_fire, flush = false.B) 493 val hit_0_miss_1_latch = holdReleaseLatch(valid = hit_0_miss_1, release = s2_fire, flush = false.B) 494 495 val miss_0_except_1_latch = holdReleaseLatch(valid = miss_0_except_1, release = s2_fire, flush = false.B) 496 val except_0_latch = holdReleaseLatch(valid = except_0, release = s2_fire, flush = false.B) 497 val hit_0_except_1_latch = holdReleaseLatch(valid = hit_0_except_1, release = s2_fire, flush = false.B) 498 499 val only_0_hit_latch = holdReleaseLatch(valid = only_0_hit, release = s2_fire, flush = false.B) 500 val hit_0_hit_1_latch = holdReleaseLatch(valid = hit_0_hit_1, release = s2_fire, flush = false.B) 501 502 503 /*** secondary miss judgment ***/ 504 505 def waitSecondComeIn(missState: UInt): Bool = (missState === m_wait_sec_miss) 506 507 def getMissSituat(slotNum : Int, missNum : Int ) :Bool = { 508 RegNext(s1_fire) && 509 RegNext(missSlot(slotNum).m_vSetIdx === s1_req_vsetIdx(missNum)) && 510 RegNext(missSlot(slotNum).m_pTag === s1_req_ptags(missNum)) && 511 !s2_port_hit(missNum) && 512 waitSecondComeIn(missStateQueue(slotNum)) 513 } 514 515 val miss_0_s2_0 = getMissSituat(slotNum = 0, missNum = 0) 516 val miss_0_s2_1 = getMissSituat(slotNum = 0, missNum = 1) 517 val miss_1_s2_0 = getMissSituat(slotNum = 1, missNum = 0) 518 val miss_1_s2_1 = getMissSituat(slotNum = 1, missNum = 1) 519 520 val miss_0_s2_0_latch = holdReleaseLatch(valid = miss_0_s2_0, release = s2_fire, flush = false.B) 521 val miss_0_s2_1_latch = holdReleaseLatch(valid = miss_0_s2_1, release = s2_fire, flush = false.B) 522 val miss_1_s2_0_latch = holdReleaseLatch(valid = miss_1_s2_0, release = s2_fire, flush = false.B) 523 val miss_1_s2_1_latch = holdReleaseLatch(valid = miss_1_s2_1, release = s2_fire, flush = false.B) 524 525 526 val slot_0_solve = fix_sec_miss(0) || fix_sec_miss(1) 527 val slot_1_solve = fix_sec_miss(2) || fix_sec_miss(3) 528 val slot_slove = VecInit(Seq(slot_0_solve, slot_1_solve)) 529 530 fix_sec_miss := VecInit(Seq(miss_0_s2_0_latch, miss_0_s2_1_latch, miss_1_s2_0_latch, miss_1_s2_1_latch)) 531 532 /*** reserved data for secondary miss ***/ 533 534 reservedRefillData(0) := DataHoldBypass(data = missSlot(0).m_data, valid = miss_0_s2_0 || miss_0_s2_1) 535 reservedRefillData(1) := DataHoldBypass(data = missSlot(1).m_data, valid = miss_1_s2_0 || miss_1_s2_1) 536 537 /*** miss state machine ***/ 538 539 //deal with not-cache-hit pmp af 540 val only_pmp_af = Wire(Vec(2, Bool())) 541 only_pmp_af(0) := s2_except_pmp_af(0) && cacheline_0_miss && !s2_except(0) && s2_valid 542 only_pmp_af(1) := s2_except_pmp_af(1) && cacheline_1_miss && !s2_except(1) && s2_valid && s2_double_line 543 544 switch(wait_state){ 545 is(wait_idle){ 546 when(only_pmp_af(0) || only_pmp_af(1) || s2_mmio){ 547 //should not send req to MissUnit when there is an access exception in PMP 548 //But to avoid using pmp exception in control signal (like s2_fire), should delay 1 cycle. 549 //NOTE: pmp exception cache line also could hit in ICache, but the result is meaningless. Just give the exception signals. 550 wait_state := wait_finish 551 }.elsewhen(miss_0_except_1_latch){ 552 wait_state := Mux(toMSHR(0).ready, wait_queue_ready ,wait_idle ) 553 }.elsewhen( only_0_miss_latch || miss_0_hit_1_latch){ 554 wait_state := Mux(toMSHR(0).ready, wait_queue_ready ,wait_idle ) 555 }.elsewhen(hit_0_miss_1_latch){ 556 wait_state := Mux(toMSHR(1).ready, wait_queue_ready ,wait_idle ) 557 }.elsewhen( miss_0_miss_1_latch ){ 558 wait_state := Mux(toMSHR(0).ready && toMSHR(1).ready, wait_queue_ready ,wait_idle) 559 } 560 } 561 562 is(wait_queue_ready){ 563 wait_state := wait_send_req 564 } 565 566 is(wait_send_req) { 567 when(miss_0_except_1_latch || only_0_miss_latch || hit_0_miss_1_latch || miss_0_hit_1_latch){ 568 wait_state := wait_one_resp 569 }.elsewhen( miss_0_miss_1_latch ){ 570 wait_state := wait_two_resp 571 } 572 } 573 574 is(wait_one_resp) { 575 when( (miss_0_except_1_latch ||only_0_miss_latch || miss_0_hit_1_latch) && fromMSHR(0).fire()){ 576 wait_state := wait_finish 577 }.elsewhen( hit_0_miss_1_latch && fromMSHR(1).fire()){ 578 wait_state := wait_finish 579 } 580 } 581 582 is(wait_two_resp) { 583 when(fromMSHR(0).fire() && fromMSHR(1).fire()){ 584 wait_state := wait_finish 585 }.elsewhen( !fromMSHR(0).fire() && fromMSHR(1).fire() ){ 586 wait_state := wait_0_resp 587 }.elsewhen(fromMSHR(0).fire() && !fromMSHR(1).fire()){ 588 wait_state := wait_1_resp 589 } 590 } 591 592 is(wait_0_resp) { 593 when(fromMSHR(0).fire()){ 594 wait_state := wait_finish 595 } 596 } 597 598 is(wait_1_resp) { 599 when(fromMSHR(1).fire()){ 600 wait_state := wait_finish 601 } 602 } 603 604 is(wait_finish) {when(s2_fire) {wait_state := wait_idle } 605 } 606 } 607 608 609 /*** send request to MissUnit ***/ 610 611 (0 until 2).map { i => 612 if(i == 1) toMSHR(i).valid := (hit_0_miss_1_latch || miss_0_miss_1_latch) && wait_state === wait_queue_ready && !s2_mmio 613 else toMSHR(i).valid := (only_0_miss_latch || miss_0_hit_1_latch || miss_0_miss_1_latch || miss_0_except_1_latch) && wait_state === wait_queue_ready && !s2_mmio 614 toMSHR(i).bits.paddr := s2_req_paddr(i) 615 toMSHR(i).bits.vaddr := s2_req_vaddr(i) 616 toMSHR(i).bits.waymask := s2_waymask(i) 617 618 619 when(toMSHR(i).fire() && missStateQueue(i) === m_invalid){ 620 missStateQueue(i) := m_valid 621 missSlot(i).m_vSetIdx := s2_req_vsetIdx(i) 622 missSlot(i).m_pTag := get_phy_tag(s2_req_paddr(i)) 623 } 624 625 when(fromMSHR(i).fire() && missStateQueue(i) === m_valid ){ 626 missStateQueue(i) := m_refilled 627 missSlot(i).m_data := fromMSHR(i).bits.data 628 missSlot(i).m_corrupt := fromMSHR(i).bits.corrupt 629 } 630 631 632 when(s2_fire && missStateQueue(i) === m_refilled){ 633 missStateQueue(i) := m_wait_sec_miss 634 } 635 636 /*** Only the first cycle to check whether meet the secondary miss ***/ 637 when(missStateQueue(i) === m_wait_sec_miss){ 638 /*** The seondary req has been fix by this slot and another also hit || the secondary req for other cacheline and hit ***/ 639 when((slot_slove(i) && s2_fire) || (!slot_slove(i) && s2_fire) ) { 640 missStateQueue(i) := m_invalid 641 } 642 /*** The seondary req has been fix by this slot but another miss/f3 not ready || the seondary req for other cacheline and miss ***/ 643 .elsewhen((slot_slove(i) && !s2_fire && s2_valid) || (s2_valid && !slot_slove(i) && !s2_fire) ){ 644 missStateQueue(i) := m_check_final 645 } 646 } 647 648 when(missStateQueue(i) === m_check_final && toMSHR(i).fire()){ 649 missStateQueue(i) := m_valid 650 missSlot(i).m_vSetIdx := s2_req_vsetIdx(i) 651 missSlot(i).m_pTag := get_phy_tag(s2_req_paddr(i)) 652 }.elsewhen(missStateQueue(i) === m_check_final) { 653 missStateQueue(i) := m_invalid 654 } 655 } 656 657 io.prefetchEnable := false.B 658 io.prefetchDisable := false.B 659 when(toMSHR.map(_.valid).reduce(_||_)){ 660 missSwitchBit := true.B 661 io.prefetchEnable := true.B 662 }.elsewhen(missSwitchBit && s2_fetch_finish){ 663 missSwitchBit := false.B 664 io.prefetchDisable := true.B 665 } 666 667 668 val miss_all_fix = wait_state === wait_finish 669 670 s2_fetch_finish := ((s2_valid && s2_fixed_hit) || miss_all_fix || hit_0_except_1_latch || except_0_latch) 671 672 /** update replacement status register: 0 is hit access/ 1 is miss access */ 673 (touch_ways zip touch_sets).zipWithIndex.map{ case((t_w,t_s), i) => 674 t_s(0) := s2_req_vsetIdx(i) 675 t_w(0).valid := s2_valid && s2_port_hit(i) 676 t_w(0).bits := OHToUInt(s2_tag_match_vec(i)) 677 678 t_s(1) := s2_req_vsetIdx(i) 679 t_w(1).valid := s2_valid && !s2_port_hit(i) 680 t_w(1).bits := OHToUInt(s2_waymask(i)) 681 } 682 683 //** use hit one-hot select data 684 val s2_hit_datas = VecInit(s2_data_cacheline.zipWithIndex.map { case(bank, i) => 685 val port_hit_data = Mux1H(s2_tag_match_vec(i).asUInt, bank) 686 port_hit_data 687 }) 688 689 val s2_register_datas = Wire(Vec(2, UInt(blockBits.W))) 690 691 s2_register_datas.zipWithIndex.map{case(bank,i) => 692 // if(i == 0) bank := Mux(s2_port_hit(i), s2_hit_datas(i), Mux(miss_0_s2_0_latch,reservedRefillData(0), Mux(miss_1_s2_0_latch,reservedRefillData(1), missSlot(0).m_data))) 693 // else bank := Mux(s2_port_hit(i), s2_hit_datas(i), Mux(miss_0_s2_1_latch,reservedRefillData(0), Mux(miss_1_s2_1_latch,reservedRefillData(1), missSlot(1).m_data))) 694 if(i == 0) bank := Mux(miss_0_s2_0_latch,reservedRefillData(0), Mux(miss_1_s2_0_latch,reservedRefillData(1), missSlot(0).m_data)) 695 else bank := Mux(miss_0_s2_1_latch,reservedRefillData(0), Mux(miss_1_s2_1_latch,reservedRefillData(1), missSlot(1).m_data)) 696 } 697 698 /** response to IFU */ 699 700 (0 until PortNumber).map{ i => 701 if(i ==0) toIFU(i).valid := s2_fire 702 else toIFU(i).valid := s2_fire && s2_double_line 703 //when select is high, use sramData. Otherwise, use registerData. 704 toIFU(i).bits.registerData := s2_register_datas(i) 705 toIFU(i).bits.sramData := s2_hit_datas(i) 706 toIFU(i).bits.select := s2_port_hit(i) 707 toIFU(i).bits.paddr := s2_req_paddr(i) 708 toIFU(i).bits.vaddr := s2_req_vaddr(i) 709 toIFU(i).bits.tlbExcp.pageFault := s2_except_pf(i) 710 toIFU(i).bits.tlbExcp.accessFault := s2_except_tlb_af(i) || missSlot(i).m_corrupt || s2_except_pmp_af(i) 711 toIFU(i).bits.tlbExcp.mmio := s2_mmio 712 713 when(RegNext(s2_fire && missSlot(i).m_corrupt)){ 714 io.errors(i).valid := true.B 715 io.errors(i).report_to_beu := false.B // l2 should have report that to bus error unit, no need to do it again 716 io.errors(i).paddr := RegNext(s2_req_paddr(i)) 717 io.errors(i).source.tag := false.B 718 io.errors(i).source.data := false.B 719 io.errors(i).source.l2 := true.B 720 } 721 } 722 723 io.perfInfo.only_0_hit := only_0_hit_latch 724 io.perfInfo.only_0_miss := only_0_miss_latch 725 io.perfInfo.hit_0_hit_1 := hit_0_hit_1_latch 726 io.perfInfo.hit_0_miss_1 := hit_0_miss_1_latch 727 io.perfInfo.miss_0_hit_1 := miss_0_hit_1_latch 728 io.perfInfo.miss_0_miss_1 := miss_0_miss_1_latch 729 io.perfInfo.hit_0_except_1 := hit_0_except_1_latch 730 io.perfInfo.miss_0_except_1 := miss_0_except_1_latch 731 io.perfInfo.except_0 := except_0_latch 732 io.perfInfo.bank_hit(0) := only_0_miss_latch || hit_0_hit_1_latch || hit_0_miss_1_latch || hit_0_except_1_latch 733 io.perfInfo.bank_hit(1) := miss_0_hit_1_latch || hit_0_hit_1_latch 734 io.perfInfo.hit := hit_0_hit_1_latch || only_0_hit_latch || hit_0_except_1_latch || except_0_latch 735 736 /** <PERF> fetch bubble generated by icache miss*/ 737 738 XSPerfAccumulate("icache_bubble_s2_miss", s2_valid && !s2_fetch_finish ) 739 740 val tlb_miss_vec = VecInit((0 until PortNumber).map(i => toITLB(i).valid && s0_can_go && fromITLB(i).bits.miss)) 741 val tlb_has_miss = tlb_miss_vec.reduce(_ || _) 742 XSPerfAccumulate("icache_bubble_s0_tlb_miss", s0_valid && tlb_has_miss ) 743} 744