1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.frontend 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import xiangshan._ 23import xiangshan.cache._ 24import xiangshan.cache.mmu._ 25import chisel3.experimental.verification 26import utils._ 27 28trait HasInstrMMIOConst extends HasXSParameter with HasIFUConst{ 29 def mmioBusWidth = 64 30 def mmioBusBytes = mmioBusWidth /8 31 def mmioBeats = FetchWidth * 4 * 8 / mmioBusWidth 32 def mmioMask = VecInit(List.fill(PredictWidth)(true.B)).asUInt 33 def mmioBusAligned(pc :UInt): UInt = align(pc, mmioBusBytes) 34} 35 36trait HasIFUConst extends HasXSParameter { 37 def align(pc: UInt, bytes: Int): UInt = Cat(pc(VAddrBits-1, log2Ceil(bytes)), 0.U(log2Ceil(bytes).W)) 38 // def groupAligned(pc: UInt) = align(pc, groupBytes) 39 // def packetAligned(pc: UInt) = align(pc, packetBytes) 40} 41 42class IfuToFtqIO(implicit p:Parameters) extends XSBundle { 43 val pdWb = Valid(new PredecodeWritebackBundle) 44} 45 46class FtqInterface(implicit p: Parameters) extends XSBundle { 47 val fromFtq = Flipped(new FtqToIfuIO) 48 val toFtq = new IfuToFtqIO 49} 50 51class ICacheInterface(implicit p: Parameters) extends XSBundle { 52 val toIMeta = Decoupled(new ICacheReadBundle) 53 val toIData = Decoupled(new ICacheReadBundle) 54 val toMissQueue = Vec(2,Decoupled(new ICacheMissReq)) 55 val fromIMeta = Input(new ICacheMetaRespBundle) 56 val fromIData = Input(new ICacheDataRespBundle) 57 val fromMissQueue = Vec(2,Flipped(Decoupled(new ICacheMissResp))) 58} 59 60class NewIFUIO(implicit p: Parameters) extends XSBundle { 61 val ftqInter = new FtqInterface 62 val icacheInter = new ICacheInterface 63 val toIbuffer = Decoupled(new FetchToIBuffer) 64 val iTLBInter = Vec(2, new BlockTlbRequestIO) 65} 66 67// record the situation in which fallThruAddr falls into 68// the middle of an RVI inst 69class LastHalfInfo(implicit p: Parameters) extends XSBundle { 70 val valid = Bool() 71 val middlePC = UInt(VAddrBits.W) 72 def matchThisBlock(startAddr: UInt) = valid && middlePC === startAddr 73} 74 75class IfuToPreDecode(implicit p: Parameters) extends XSBundle { 76 val data = if(HasCExtension) Vec(PredictWidth + 1, UInt(16.W)) else Vec(PredictWidth, UInt(32.W)) 77 val startAddr = UInt(VAddrBits.W) 78 val fallThruAddr = UInt(VAddrBits.W) 79 val fallThruError = Bool() 80 val isDoubleLine = Bool() 81 val ftqOffset = Valid(UInt(log2Ceil(PredictWidth).W)) 82 val target = UInt(VAddrBits.W) 83 val pageFault = Vec(2, Bool()) 84 val accessFault = Vec(2, Bool()) 85 val instValid = Bool() 86 val lastHalfMatch = Bool() 87 val oversize = Bool() 88} 89 90class NewIFU(implicit p: Parameters) extends XSModule with HasICacheParameters 91{ 92 println(s"icache ways: ${nWays} sets:${nSets}") 93 val io = IO(new NewIFUIO) 94 val (toFtq, fromFtq) = (io.ftqInter.toFtq, io.ftqInter.fromFtq) 95 val (toMeta, toData, meta_resp, data_resp) = (io.icacheInter.toIMeta, io.icacheInter.toIData, io.icacheInter.fromIMeta, io.icacheInter.fromIData) 96 val (toMissQueue, fromMissQueue) = (io.icacheInter.toMissQueue, io.icacheInter.fromMissQueue) 97 val (toITLB, fromITLB) = (VecInit(io.iTLBInter.map(_.req)), VecInit(io.iTLBInter.map(_.resp))) 98 99 def isCrossLineReq(start: UInt, end: UInt): Bool = start(blockOffBits) ^ end(blockOffBits) 100 101 def isLastInCacheline(fallThruAddr: UInt): Bool = fallThruAddr(blockOffBits - 1, 1) === 0.U 102 103 104 //--------------------------------------------- 105 // Fetch Stage 1 : 106 // * Send req to ICache Meta/Data 107 // * Check whether need 2 line fetch 108 //--------------------------------------------- 109 110 val f0_valid = fromFtq.req.valid 111 val f0_ftq_req = fromFtq.req.bits 112 val f0_situation = VecInit(Seq(isCrossLineReq(f0_ftq_req.startAddr, f0_ftq_req.fallThruAddr), isLastInCacheline(f0_ftq_req.fallThruAddr))) 113 val f0_doubleLine = f0_situation(0) || f0_situation(1) 114 val f0_vSetIdx = VecInit(get_idx((f0_ftq_req.startAddr)), get_idx(f0_ftq_req.fallThruAddr)) 115 val f0_fire = fromFtq.req.fire() 116 117 val f0_flush, f1_flush, f2_flush, f3_flush = WireInit(false.B) 118 val from_bpu_f0_flush, from_bpu_f1_flush, from_bpu_f2_flush, from_bpu_f3_flush = WireInit(false.B) 119 120 from_bpu_f0_flush := fromFtq.flushFromBpu.shouldFlushByStage2(f0_ftq_req.ftqIdx) || 121 fromFtq.flushFromBpu.shouldFlushByStage3(f0_ftq_req.ftqIdx) 122 123 val f3_redirect = WireInit(false.B) 124 f3_flush := fromFtq.redirect.valid 125 f2_flush := f3_flush || f3_redirect 126 f1_flush := f2_flush || from_bpu_f1_flush 127 f0_flush := f1_flush || from_bpu_f0_flush 128 129 val f1_ready, f2_ready, f3_ready = WireInit(false.B) 130 131 //fetch: send addr to Meta/TLB and Data simultaneously 132 val fetch_req = List(toMeta, toData) 133 for(i <- 0 until 2) { 134 fetch_req(i).valid := f0_fire 135 fetch_req(i).bits.isDoubleLine := f0_doubleLine 136 fetch_req(i).bits.vSetIdx := f0_vSetIdx 137 } 138 139 fromFtq.req.ready := fetch_req(0).ready && fetch_req(1).ready && f1_ready && GTimer() > 500.U 140 141 XSPerfAccumulate("ifu_bubble_ftq_not_valid", !f0_valid ) 142 XSPerfAccumulate("ifu_bubble_pipe_stall", f0_valid && fetch_req(0).ready && fetch_req(1).ready && !f1_ready ) 143 XSPerfAccumulate("ifu_bubble_sram_0_busy", f0_valid && !fetch_req(0).ready ) 144 XSPerfAccumulate("ifu_bubble_sram_1_busy", f0_valid && !fetch_req(1).ready ) 145 146 //--------------------------------------------- 147 // Fetch Stage 2 : 148 // * Send req to ITLB and TLB Response (Get Paddr) 149 // * ICache Response (Get Meta and Data) 150 // * Hit Check (Generate hit signal and hit vector) 151 // * Get victim way 152 //--------------------------------------------- 153 154 //TODO: handle fetch exceptions 155 156 val tlbRespAllValid = WireInit(false.B) 157 158 val f1_valid = RegInit(false.B) 159 val f1_ftq_req = RegEnable(next = f0_ftq_req, enable=f0_fire) 160 val f1_situation = RegEnable(next = f0_situation, enable=f0_fire) 161 val f1_doubleLine = RegEnable(next = f0_doubleLine, enable=f0_fire) 162 val f1_vSetIdx = RegEnable(next = f0_vSetIdx, enable=f0_fire) 163 val f1_fire = f1_valid && tlbRespAllValid && f2_ready 164 165 f1_ready := f2_ready && tlbRespAllValid || !f1_valid 166 167 from_bpu_f1_flush := fromFtq.flushFromBpu.shouldFlushByStage3(f1_ftq_req.ftqIdx) 168 169 val preDecoder = Module(new PreDecode) 170 val (preDecoderIn, preDecoderOut) = (preDecoder.io.in, preDecoder.io.out) 171 172 //flush generate and to Ftq 173 val predecodeOutValid = WireInit(false.B) 174 175 when(f1_flush) {f1_valid := false.B} 176 .elsewhen(f0_fire && !f0_flush) {f1_valid := true.B} 177 .elsewhen(f1_fire) {f1_valid := false.B} 178 179 toITLB(0).valid := f1_valid 180 toITLB(0).bits.vaddr := align(f1_ftq_req.startAddr, blockBytes) 181 toITLB(0).bits.debug.pc := align(f1_ftq_req.startAddr, blockBytes) 182 183 toITLB(1).valid := f1_valid && f1_doubleLine 184 toITLB(1).bits.vaddr := align(f1_ftq_req.fallThruAddr, blockBytes) 185 toITLB(1).bits.debug.pc := align(f1_ftq_req.fallThruAddr, blockBytes) 186 187 toITLB.map{port => 188 port.bits.cmd := TlbCmd.exec 189 port.bits.roqIdx := DontCare 190 port.bits.debug.isFirstIssue := DontCare 191 } 192 193 fromITLB.map(_.ready := true.B) 194 195 val (tlbRespValid, tlbRespPAddr) = (fromITLB.map(_.valid), VecInit(fromITLB.map(_.bits.paddr))) 196 val (tlbRespMiss, tlbRespMMIO) = (fromITLB.map(port => port.bits.miss && port.valid), fromITLB.map(port => port.bits.mmio && port.valid)) 197 val (tlbExcpPF, tlbExcpAF) = (fromITLB.map(port => port.bits.excp.pf.instr && port.valid), fromITLB.map(port => (port.bits.excp.af.instr || port.bits.mmio) && port.valid)) //TODO: Temp treat mmio req as access fault 198 199 tlbRespAllValid := tlbRespValid(0) && (tlbRespValid(1) || !f1_doubleLine) 200 201 val f1_pAddrs = tlbRespPAddr //TODO: Temporary assignment 202 val f1_pTags = VecInit(f1_pAddrs.map(get_phy_tag(_))) 203 val (f1_tags, f1_cacheline_valid, f1_datas) = (meta_resp.tags, meta_resp.valid, data_resp.datas) 204 val bank0_hit_vec = VecInit(f1_tags(0).zipWithIndex.map{ case(way_tag,i) => f1_cacheline_valid(0)(i) && way_tag === f1_pTags(0) }) 205 val bank1_hit_vec = VecInit(f1_tags(1).zipWithIndex.map{ case(way_tag,i) => f1_cacheline_valid(1)(i) && way_tag === f1_pTags(1) }) 206 val (bank0_hit,bank1_hit) = (ParallelOR(bank0_hit_vec) && !tlbExcpPF(0) && !tlbExcpAF(0), ParallelOR(bank1_hit_vec) && !tlbExcpPF(1) && !tlbExcpAF(1)) 207 val f1_hit = (bank0_hit && bank1_hit && f1_valid && f1_doubleLine) || (f1_valid && !f1_doubleLine && bank0_hit) 208 val f1_bank_hit_vec = VecInit(Seq(bank0_hit_vec, bank1_hit_vec)) 209 val f1_bank_hit = VecInit(Seq(bank0_hit, bank1_hit)) 210 211 val replacers = Seq.fill(2)(ReplacementPolicy.fromString(Some("random"),nWays,nSets/2)) 212 val f1_victim_masks = VecInit(replacers.zipWithIndex.map{case (replacer, i) => UIntToOH(replacer.way(f1_vSetIdx(i)))}) 213 214 val touch_sets = Seq.fill(2)(Wire(Vec(2, UInt(log2Ceil(nSets/2).W)))) 215 val touch_ways = Seq.fill(2)(Wire(Vec(2, Valid(UInt(log2Ceil(nWays).W)))) ) 216 217 ((replacers zip touch_sets) zip touch_ways).map{case ((r, s),w) => r.access(s,w)} 218 219 val f1_hit_data = VecInit(f1_datas.zipWithIndex.map { case(bank, i) => 220 val bank_hit_data = Mux1H(f1_bank_hit_vec(i).asUInt, bank) 221 bank_hit_data 222 }) 223 224 (0 until nWays).map{ w => 225 XSPerfAccumulate("line_0_hit_way_" + Integer.toString(w, 10), f1_fire && f1_bank_hit(0) && OHToUInt(f1_bank_hit_vec(0)) === w.U) 226 } 227 228 (0 until nWays).map{ w => 229 XSPerfAccumulate("line_0_victim_way_" + Integer.toString(w, 10), f1_fire && !f1_bank_hit(0) && OHToUInt(f1_victim_masks(0)) === w.U) 230 } 231 232 (0 until nWays).map{ w => 233 XSPerfAccumulate("line_1_hit_way_" + Integer.toString(w, 10), f1_fire && f1_doubleLine && f1_bank_hit(1) && OHToUInt(f1_bank_hit_vec(1)) === w.U) 234 } 235 236 (0 until nWays).map{ w => 237 XSPerfAccumulate("line_1_victim_way_" + Integer.toString(w, 10), f1_fire && f1_doubleLine && !f1_bank_hit(1) && OHToUInt(f1_victim_masks(1)) === w.U) 238 } 239 240 XSPerfAccumulate("ifu_bubble_f1_tlb_miss", f1_valid && !tlbRespAllValid ) 241 242 //--------------------------------------------- 243 // Fetch Stage 3 : 244 // * get data from last stage (hit from f1_hit_data/miss from missQueue response) 245 // * if at least one needed cacheline miss, wait for miss queue response (a wait_state machine) THIS IS TOO UGLY!!! 246 // * cut cacheline(s) and send to PreDecode 247 // * check if prediction is right (branch target and type, jump direction and type , jal target ) 248 //--------------------------------------------- 249 val f2_fetchFinish = Wire(Bool()) 250 251 val f2_valid = RegInit(false.B) 252 val f2_ftq_req = RegEnable(next = f1_ftq_req, enable = f1_fire) 253 val f2_situation = RegEnable(next = f1_situation, enable=f1_fire) 254 val f2_doubleLine = RegEnable(next = f1_doubleLine, enable=f1_fire) 255 val f2_fire = f2_valid && f2_fetchFinish && f3_ready 256 257 f2_ready := (f3_ready && f2_fetchFinish) || !f2_valid 258 259 when(f2_flush) {f2_valid := false.B} 260 .elsewhen(f1_fire && !f1_flush) {f2_valid := true.B } 261 .elsewhen(f2_fire) {f2_valid := false.B} 262 263 264 val f2_pAddrs = RegEnable(next = f1_pAddrs, enable = f1_fire) 265 val f2_hit = RegEnable(next = f1_hit , enable = f1_fire) 266 val f2_bank_hit = RegEnable(next = f1_bank_hit, enable = f1_fire) 267 val f2_miss = f2_valid && !f2_hit 268 val (f2_vSetIdx, f2_pTags) = (RegEnable(next = f1_vSetIdx, enable = f1_fire), RegEnable(next = f1_pTags, enable = f1_fire)) 269 val f2_waymask = RegEnable(next = f1_victim_masks, enable = f1_fire) 270 //exception information 271 val f2_except_pf = RegEnable(next = VecInit(tlbExcpPF), enable = f1_fire) 272 val f2_except_af = RegEnable(next = VecInit(tlbExcpAF), enable = f1_fire) 273 val f2_except = VecInit((0 until 2).map{i => f2_except_pf(i) || f2_except_af(i)}) 274 val f2_has_except = f2_valid && (f2_except_af.reduce(_||_) || f2_except_pf.reduce(_||_)) 275 276 //instruction 277 val wait_idle :: wait_queue_ready :: wait_send_req :: wait_two_resp :: wait_0_resp :: wait_1_resp :: wait_one_resp ::wait_finish :: Nil = Enum(8) 278 val wait_state = RegInit(wait_idle) 279 280 fromMissQueue.map{port => port.ready := true.B} 281 282 val (miss0_resp, miss1_resp) = (fromMissQueue(0).fire(), fromMissQueue(1).fire()) 283 val (bank0_fix, bank1_fix) = (miss0_resp && !f2_bank_hit(0), miss1_resp && f2_doubleLine && !f2_bank_hit(1)) 284 285 val only_0_miss = f2_valid && !f2_hit && !f2_doubleLine && !f2_has_except 286 val only_0_hit = f2_valid && f2_hit && !f2_doubleLine 287 val hit_0_hit_1 = f2_valid && f2_hit && f2_doubleLine 288 val (hit_0_miss_1 , miss_0_hit_1, miss_0_miss_1) = ( (f2_valid && !f2_bank_hit(1) && f2_bank_hit(0) && f2_doubleLine && !f2_has_except), 289 (f2_valid && !f2_bank_hit(0) && f2_bank_hit(1) && f2_doubleLine && !f2_has_except), 290 (f2_valid && !f2_bank_hit(0) && !f2_bank_hit(1) && f2_doubleLine && !f2_has_except), 291 ) 292 293 val hit_0_except_1 = f2_valid && f2_doubleLine && !f2_except(0) && f2_except(1) && f2_bank_hit(0) 294 val miss_0_except_1 = f2_valid && f2_doubleLine && !f2_except(0) && f2_except(1) && !f2_bank_hit(0) 295 //val fetch0_except_1 = hit_0_except_1 || miss_0_except_1 296 val except_0 = f2_valid && f2_except(0) 297 298 val f2_mq_datas = Reg(Vec(2, UInt(blockBits.W))) 299 300 when(fromMissQueue(0).fire) {f2_mq_datas(0) := fromMissQueue(0).bits.data} 301 when(fromMissQueue(1).fire) {f2_mq_datas(1) := fromMissQueue(1).bits.data} 302 303 switch(wait_state){ 304 is(wait_idle){ 305 when(miss_0_except_1){ 306 wait_state := Mux(toMissQueue(0).ready, wait_queue_ready ,wait_idle ) 307 }.elsewhen( only_0_miss || miss_0_hit_1){ 308 wait_state := Mux(toMissQueue(0).ready, wait_queue_ready ,wait_idle ) 309 }.elsewhen(hit_0_miss_1){ 310 wait_state := Mux(toMissQueue(1).ready, wait_queue_ready ,wait_idle ) 311 }.elsewhen( miss_0_miss_1 ){ 312 wait_state := Mux(toMissQueue(0).ready && toMissQueue(1).ready, wait_queue_ready ,wait_idle) 313 } 314 } 315 316 //TODO: naive logic for wait icache response 317 is(wait_queue_ready){ 318 wait_state := wait_send_req 319 } 320 321 is(wait_send_req) { 322 when(miss_0_except_1 || only_0_miss || hit_0_miss_1 || miss_0_hit_1){ 323 wait_state := wait_one_resp 324 }.elsewhen( miss_0_miss_1 ){ 325 wait_state := wait_two_resp 326 } 327 } 328 329 is(wait_one_resp) { 330 when( (miss_0_except_1 ||only_0_miss || miss_0_hit_1) && fromMissQueue(0).fire()){ 331 wait_state := wait_finish 332 }.elsewhen( hit_0_miss_1 && fromMissQueue(1).fire()){ 333 wait_state := wait_finish 334 } 335 } 336 337 is(wait_two_resp) { 338 when(fromMissQueue(0).fire() && fromMissQueue(1).fire()){ 339 wait_state := wait_finish 340 }.elsewhen( !fromMissQueue(0).fire() && fromMissQueue(1).fire() ){ 341 wait_state := wait_0_resp 342 }.elsewhen(fromMissQueue(0).fire() && !fromMissQueue(1).fire()){ 343 wait_state := wait_1_resp 344 } 345 } 346 347 is(wait_0_resp) { 348 when(fromMissQueue(0).fire()){ 349 wait_state := wait_finish 350 } 351 } 352 353 is(wait_1_resp) { 354 when(fromMissQueue(1).fire()){ 355 wait_state := wait_finish 356 } 357 } 358 359 is(wait_finish) { 360 when(f2_fire) {wait_state := wait_idle } 361 } 362 } 363 364 when(f2_flush) { wait_state := wait_idle } 365 366 (0 until 2).map { i => 367 if(i == 1) toMissQueue(i).valid := (hit_0_miss_1 || miss_0_miss_1) && wait_state === wait_queue_ready 368 else toMissQueue(i).valid := (only_0_miss || miss_0_hit_1 || miss_0_miss_1) && wait_state === wait_queue_ready 369 toMissQueue(i).bits.addr := f2_pAddrs(i) 370 toMissQueue(i).bits.vSetIdx := f2_vSetIdx(i) 371 toMissQueue(i).bits.waymask := f2_waymask(i) 372 toMissQueue(i).bits.clientID :=0.U 373 } 374 375 val miss_all_fix = (wait_state === wait_finish) 376 377 f2_fetchFinish := ((f2_valid && f2_hit) || miss_all_fix || hit_0_except_1 || except_0) 378 379 XSPerfAccumulate("ifu_bubble_f2_miss", f2_valid && !f2_fetchFinish ) 380 381 (touch_ways zip touch_sets).zipWithIndex.map{ case((t_w,t_s), i) => 382 t_s(0) := f1_vSetIdx(i) 383 t_w(0).valid := f1_bank_hit(i) 384 t_w(0).bits := OHToUInt(f1_bank_hit_vec(i)) 385 386 t_s(1) := f2_vSetIdx(i) 387 t_w(1).valid := f2_valid && !f2_bank_hit(i) 388 t_w(1).bits := OHToUInt(f2_waymask(i)) 389 } 390 391 val sec_miss_reg = RegInit(0.U.asTypeOf(Vec(4, Bool()))) 392 val reservedRefillData = Reg(Vec(2, UInt(blockBits.W))) 393 val f2_hit_datas = RegEnable(next = f1_hit_data, enable = f1_fire) 394 val f2_datas = Wire(Vec(2, UInt(blockBits.W))) 395 396 f2_datas.zipWithIndex.map{case(bank,i) => 397 if(i == 0) bank := Mux(f2_bank_hit(i), f2_hit_datas(i),Mux(sec_miss_reg(2),reservedRefillData(1),Mux(sec_miss_reg(0),reservedRefillData(0), f2_mq_datas(i)))) 398 else bank := Mux(f2_bank_hit(i), f2_hit_datas(i),Mux(sec_miss_reg(3),reservedRefillData(1),Mux(sec_miss_reg(1),reservedRefillData(0), f2_mq_datas(i)))) 399 } 400 401 val f2_jump_valids = Fill(PredictWidth, !preDecoderOut.cfiOffset.valid) | Fill(PredictWidth, 1.U(1.W)) >> (~preDecoderOut.cfiOffset.bits) 402 val f2_predecode_valids = VecInit(preDecoderOut.pd.map(instr => instr.valid)).asUInt & f2_jump_valids 403 404 def cut(cacheline: UInt, start: UInt) : Vec[UInt] ={ 405 if(HasCExtension){ 406 val result = Wire(Vec(PredictWidth + 1, UInt(16.W))) 407 val dataVec = cacheline.asTypeOf(Vec(blockBytes * 2/ 2, UInt(16.W))) 408 val startPtr = Cat(0.U(1.W), start(blockOffBits-1, 1)) 409 (0 until PredictWidth + 1).foreach( i => 410 result(i) := dataVec(startPtr + i.U) 411 ) 412 result 413 } else { 414 val result = Wire(Vec(PredictWidth, UInt(32.W)) ) 415 val dataVec = cacheline.asTypeOf(Vec(blockBytes * 2/ 4, UInt(32.W))) 416 val startPtr = Cat(0.U(1.W), start(blockOffBits-1, 2)) 417 (0 until PredictWidth).foreach( i => 418 result(i) := dataVec(startPtr + i.U) 419 ) 420 result 421 } 422 } 423 424 val f2_cut_data = cut( Cat(f2_datas.map(cacheline => cacheline.asUInt ).reverse).asUInt, f2_ftq_req.startAddr ) 425 426 // deal with secondary miss in f1 427 val f2_0_f1_0 = ((f2_valid && !f2_bank_hit(0)) && f1_valid && (get_block_addr(f2_ftq_req.startAddr) === get_block_addr(f1_ftq_req.startAddr))) 428 val f2_0_f1_1 = ((f2_valid && !f2_bank_hit(0)) && f1_valid && f1_doubleLine && (get_block_addr(f2_ftq_req.startAddr) === get_block_addr(f1_ftq_req.startAddr + blockBytes.U))) 429 val f2_1_f1_0 = ((f2_valid && !f2_bank_hit(1) && f2_doubleLine) && f1_valid && (get_block_addr(f2_ftq_req.startAddr+ blockBytes.U) === get_block_addr(f1_ftq_req.startAddr) )) 430 val f2_1_f1_1 = ((f2_valid && !f2_bank_hit(1) && f2_doubleLine) && f1_valid && f1_doubleLine && (get_block_addr(f2_ftq_req.startAddr+ blockBytes.U) === get_block_addr(f1_ftq_req.startAddr + blockBytes.U) )) 431 432 val isSameLine = f2_0_f1_0 || f2_0_f1_1 || f2_1_f1_0 || f2_1_f1_1 433 val sec_miss_sit = VecInit(Seq(f2_0_f1_0, f2_0_f1_1, f2_1_f1_0, f2_1_f1_1)) 434 val hasSecMiss = RegInit(false.B) 435 436 when(f2_flush){ 437 sec_miss_reg.map(sig => sig := false.B) 438 hasSecMiss := false.B 439 }.elsewhen(isSameLine && !f1_flush && f2_fire){ 440 sec_miss_reg.zipWithIndex.map{case(sig, i) => sig := sec_miss_sit(i)} 441 hasSecMiss := true.B 442 }.elsewhen((!isSameLine || f1_flush) && hasSecMiss && f2_fire){ 443 sec_miss_reg.map(sig => sig := false.B) 444 hasSecMiss := false.B 445 } 446 447 when((f2_0_f1_0 || f2_0_f1_1) && f2_fire){ 448 reservedRefillData(0) := f2_mq_datas(0) 449 } 450 451 when((f2_1_f1_0 || f2_1_f1_1) && f2_fire){ 452 reservedRefillData(1) := f2_mq_datas(1) 453 } 454 455 456 //--------------------------------------------- 457 // Fetch Stage 4 : 458 // * get data from last stage (hit from f1_hit_data/miss from missQueue response) 459 // * if at least one needed cacheline miss, wait for miss queue response (a wait_state machine) THIS IS TOO UGLY!!! 460 // * cut cacheline(s) and send to PreDecode 461 // * check if prediction is right (branch target and type, jump direction and type , jal target ) 462 //--------------------------------------------- 463 val f3_valid = RegInit(false.B) 464 val f3_ftq_req = RegEnable(next = f2_ftq_req, enable=f2_fire) 465 val f3_situation = RegEnable(next = f2_situation, enable=f2_fire) 466 val f3_doubleLine = RegEnable(next = f2_doubleLine, enable=f2_fire) 467 val f3_fire = io.toIbuffer.fire() 468 469 when(f3_flush) {f3_valid := false.B} 470 .elsewhen(f2_fire && !f2_flush) {f3_valid := true.B } 471 .elsewhen(io.toIbuffer.fire()) {f3_valid := false.B} 472 473 f3_ready := io.toIbuffer.ready || !f2_valid 474 475 val f3_cut_data = RegEnable(next = f2_cut_data, enable=f2_fire) 476 val f3_except_pf = RegEnable(next = f2_except_pf, enable = f2_fire) 477 val f3_except_af = RegEnable(next = f2_except_af, enable = f2_fire) 478 val f3_hit = RegEnable(next = f2_hit , enable = f2_fire) 479 480 val f3_lastHalf = RegInit(0.U.asTypeOf(new LastHalfInfo)) 481 val f3_lastHalfMatch = f3_lastHalf.matchThisBlock(f3_ftq_req.startAddr) 482 val f3_except = VecInit((0 until 2).map{i => f3_except_pf(i) || f3_except_af(i)}) 483 val f3_has_except = f3_valid && (f3_except_af.reduce(_||_) || f3_except_pf.reduce(_||_)) 484 485 //performance counter 486 val f3_only_0_hit = RegEnable(next = only_0_hit, enable = f2_fire) 487 val f3_only_0_miss = RegEnable(next = only_0_miss, enable = f2_fire) 488 val f3_hit_0_hit_1 = RegEnable(next = hit_0_hit_1, enable = f2_fire) 489 val f3_hit_0_miss_1 = RegEnable(next = hit_0_miss_1, enable = f2_fire) 490 val f3_miss_0_hit_1 = RegEnable(next = miss_0_hit_1, enable = f2_fire) 491 val f3_miss_0_miss_1 = RegEnable(next = miss_0_miss_1, enable = f2_fire) 492 493 val f3_bank_hit = RegEnable(next = f2_bank_hit, enable = f2_fire) 494 val f3_req_0 = io.toIbuffer.fire() 495 val f3_req_1 = io.toIbuffer.fire() && f3_doubleLine 496 val f3_hit_0 = io.toIbuffer.fire() & f3_bank_hit(0) 497 val f3_hit_1 = io.toIbuffer.fire() && f3_doubleLine & f3_bank_hit(1) 498 499 500 preDecoderIn.instValid := f3_valid && !f3_has_except 501 preDecoderIn.data := f3_cut_data 502 preDecoderIn.startAddr := f3_ftq_req.startAddr 503 preDecoderIn.fallThruAddr := f3_ftq_req.fallThruAddr 504 preDecoderIn.fallThruError := f3_ftq_req.fallThruError 505 preDecoderIn.isDoubleLine := f3_doubleLine 506 preDecoderIn.ftqOffset := f3_ftq_req.ftqOffset 507 preDecoderIn.target := f3_ftq_req.target 508 preDecoderIn.oversize := f3_ftq_req.oversize 509 preDecoderIn.lastHalfMatch := f3_lastHalfMatch 510 preDecoderIn.pageFault := f3_except_pf 511 preDecoderIn.accessFault := f3_except_af 512 513 514 // TODO: What if next packet does not match? 515 when (f3_flush) { 516 f3_lastHalf.valid := false.B 517 }.elsewhen (io.toIbuffer.fire()) { 518 f3_lastHalf.valid := preDecoderOut.hasLastHalf 519 f3_lastHalf.middlePC := preDecoderOut.realEndPC 520 } 521 522 val f3_predecode_range = VecInit(preDecoderOut.pd.map(inst => inst.valid)).asUInt 523 524 io.toIbuffer.valid := f3_valid 525 io.toIbuffer.bits.instrs := preDecoderOut.instrs 526 io.toIbuffer.bits.valid := f3_predecode_range & preDecoderOut.instrRange.asUInt 527 io.toIbuffer.bits.pd := preDecoderOut.pd 528 io.toIbuffer.bits.ftqPtr := f3_ftq_req.ftqIdx 529 io.toIbuffer.bits.pc := preDecoderOut.pc 530 io.toIbuffer.bits.ftqOffset.zipWithIndex.map{case(a, i) => a.bits := i.U; a.valid := preDecoderOut.takens(i)} 531 io.toIbuffer.bits.foldpc := preDecoderOut.pc.map(i => XORFold(i(VAddrBits-1,1), MemPredPCWidth)) 532 io.toIbuffer.bits.ipf := preDecoderOut.pageFault 533 io.toIbuffer.bits.acf := preDecoderOut.accessFault 534 io.toIbuffer.bits.crossPageIPFFix := preDecoderOut.crossPageIPF 535 536 //Write back to Ftq 537 val finishFetchMaskReg = RegNext(f3_valid && !(f2_fire && !f2_flush)) 538 539 toFtq.pdWb.valid := !finishFetchMaskReg && f3_valid 540 toFtq.pdWb.bits.pc := preDecoderOut.pc 541 toFtq.pdWb.bits.pd := preDecoderOut.pd 542 toFtq.pdWb.bits.pd.zipWithIndex.map{case(instr,i) => instr.valid := f3_predecode_range(i)} 543 toFtq.pdWb.bits.ftqIdx := f3_ftq_req.ftqIdx 544 toFtq.pdWb.bits.ftqOffset := f3_ftq_req.ftqOffset.bits 545 toFtq.pdWb.bits.misOffset := preDecoderOut.misOffset 546 toFtq.pdWb.bits.cfiOffset := preDecoderOut.cfiOffset 547 toFtq.pdWb.bits.target := preDecoderOut.target 548 toFtq.pdWb.bits.jalTarget := preDecoderOut.jalTarget 549 toFtq.pdWb.bits.instrRange := preDecoderOut.instrRange 550 551 val predecodeFlush = preDecoderOut.misOffset.valid && f3_valid 552 val predecodeFlushReg = RegNext(predecodeFlush && !(f2_fire && !f2_flush)) 553 554 555 f3_redirect := !predecodeFlushReg && predecodeFlush 556 557 XSPerfAccumulate("ifu_req", io.toIbuffer.fire() ) 558 XSPerfAccumulate("ifu_miss", io.toIbuffer.fire() && !f3_hit ) 559 XSPerfAccumulate("ifu_req_cacheline_0", f3_req_0 ) 560 XSPerfAccumulate("ifu_req_cacheline_1", f3_req_1 ) 561 XSPerfAccumulate("ifu_req_cacheline_0_hit", f3_hit_0 ) 562 XSPerfAccumulate("ifu_req_cacheline_1_hit", f3_hit_1 ) 563 XSPerfAccumulate("frontendFlush", f3_redirect ) 564 XSPerfAccumulate("only_0_hit", f3_only_0_hit && io.toIbuffer.fire() ) 565 XSPerfAccumulate("only_0_miss", f3_only_0_miss && io.toIbuffer.fire() ) 566 XSPerfAccumulate("hit_0_hit_1", f3_hit_0_hit_1 && io.toIbuffer.fire() ) 567 XSPerfAccumulate("hit_0_miss_1", f3_hit_0_miss_1 && io.toIbuffer.fire() ) 568 XSPerfAccumulate("miss_0_hit_1", f3_miss_0_hit_1 && io.toIbuffer.fire() ) 569 XSPerfAccumulate("miss_0_miss_1", f3_miss_0_miss_1 && io.toIbuffer.fire() ) 570 XSPerfAccumulate("cross_line_block", io.toIbuffer.fire() && f3_situation(0) ) 571 XSPerfAccumulate("fall_through_is_cacheline_end", io.toIbuffer.fire() && f3_situation(1) ) 572} 573