1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.frontend 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import xiangshan._ 23import xiangshan.cache._ 24import xiangshan.cache.mmu._ 25import chisel3.experimental.verification 26import utils._ 27import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle} 28 29trait HasInstrMMIOConst extends HasXSParameter with HasIFUConst{ 30 def mmioBusWidth = 64 31 def mmioBusBytes = mmioBusWidth /8 32 def mmioBeats = FetchWidth * 4 * 8 / mmioBusWidth 33 def mmioMask = VecInit(List.fill(PredictWidth)(true.B)).asUInt 34 def mmioBusAligned(pc :UInt): UInt = align(pc, mmioBusBytes) 35} 36 37trait HasIFUConst extends HasXSParameter { 38 def align(pc: UInt, bytes: Int): UInt = Cat(pc(VAddrBits-1, log2Ceil(bytes)), 0.U(log2Ceil(bytes).W)) 39 // def groupAligned(pc: UInt) = align(pc, groupBytes) 40 // def packetAligned(pc: UInt) = align(pc, packetBytes) 41} 42 43class IfuToFtqIO(implicit p:Parameters) extends XSBundle { 44 val pdWb = Valid(new PredecodeWritebackBundle) 45} 46 47class FtqInterface(implicit p: Parameters) extends XSBundle { 48 val fromFtq = Flipped(new FtqToIfuIO) 49 val toFtq = new IfuToFtqIO 50} 51 52class ICacheInterface(implicit p: Parameters) extends XSBundle { 53 val toIMeta = Decoupled(new ICacheReadBundle) 54 val toIData = Decoupled(new ICacheReadBundle) 55 val toMissQueue = Vec(2,Decoupled(new ICacheMissReq)) 56 val fromIMeta = Input(new ICacheMetaRespBundle) 57 val fromIData = Input(new ICacheDataRespBundle) 58 val fromMissQueue = Vec(2,Flipped(Decoupled(new ICacheMissResp))) 59} 60 61class NewIFUIO(implicit p: Parameters) extends XSBundle { 62 val ftqInter = new FtqInterface 63 val icacheInter = new ICacheInterface 64 val toIbuffer = Decoupled(new FetchToIBuffer) 65 val iTLBInter = Vec(2, new BlockTlbRequestIO) 66 val pmp = Vec(2, new Bundle { 67 val req = Valid(new PMPReqBundle()) 68 val resp = Input(new PMPRespBundle()) 69 }) 70} 71 72// record the situation in which fallThruAddr falls into 73// the middle of an RVI inst 74class LastHalfInfo(implicit p: Parameters) extends XSBundle { 75 val valid = Bool() 76 val middlePC = UInt(VAddrBits.W) 77 def matchThisBlock(startAddr: UInt) = valid && middlePC === startAddr 78} 79 80class IfuToPreDecode(implicit p: Parameters) extends XSBundle { 81 val data = if(HasCExtension) Vec(PredictWidth + 1, UInt(16.W)) else Vec(PredictWidth, UInt(32.W)) 82 val startAddr = UInt(VAddrBits.W) 83 val fallThruAddr = UInt(VAddrBits.W) 84 val fallThruError = Bool() 85 val isDoubleLine = Bool() 86 val ftqOffset = Valid(UInt(log2Ceil(PredictWidth).W)) 87 val target = UInt(VAddrBits.W) 88 val pageFault = Vec(2, Bool()) 89 val accessFault = Vec(2, Bool()) 90 val instValid = Bool() 91 val lastHalfMatch = Bool() 92 val oversize = Bool() 93} 94 95class NewIFU(implicit p: Parameters) extends XSModule with HasICacheParameters 96{ 97 println(s"icache ways: ${nWays} sets:${nSets}") 98 val io = IO(new NewIFUIO) 99 val (toFtq, fromFtq) = (io.ftqInter.toFtq, io.ftqInter.fromFtq) 100 val (toMeta, toData, meta_resp, data_resp) = (io.icacheInter.toIMeta, io.icacheInter.toIData, io.icacheInter.fromIMeta, io.icacheInter.fromIData) 101 val (toMissQueue, fromMissQueue) = (io.icacheInter.toMissQueue, io.icacheInter.fromMissQueue) 102 val (toITLB, fromITLB) = (VecInit(io.iTLBInter.map(_.req)), VecInit(io.iTLBInter.map(_.resp))) 103 val fromPMP = io.pmp.map(_.resp) 104 105 def isCrossLineReq(start: UInt, end: UInt): Bool = start(blockOffBits) ^ end(blockOffBits) 106 107 def isLastInCacheline(fallThruAddr: UInt): Bool = fallThruAddr(blockOffBits - 1, 1) === 0.U 108 109 110 //--------------------------------------------- 111 // Fetch Stage 1 : 112 // * Send req to ICache Meta/Data 113 // * Check whether need 2 line fetch 114 //--------------------------------------------- 115 116 val f0_valid = fromFtq.req.valid 117 val f0_ftq_req = fromFtq.req.bits 118 val f0_situation = VecInit(Seq(isCrossLineReq(f0_ftq_req.startAddr, f0_ftq_req.fallThruAddr), isLastInCacheline(f0_ftq_req.fallThruAddr))) 119 val f0_doubleLine = f0_situation(0) || f0_situation(1) 120 val f0_vSetIdx = VecInit(get_idx((f0_ftq_req.startAddr)), get_idx(f0_ftq_req.fallThruAddr)) 121 val f0_fire = fromFtq.req.fire() 122 123 val f0_flush, f1_flush, f2_flush, f3_flush = WireInit(false.B) 124 val from_bpu_f0_flush, from_bpu_f1_flush, from_bpu_f2_flush, from_bpu_f3_flush = WireInit(false.B) 125 126 from_bpu_f0_flush := fromFtq.flushFromBpu.shouldFlushByStage2(f0_ftq_req.ftqIdx) || 127 fromFtq.flushFromBpu.shouldFlushByStage3(f0_ftq_req.ftqIdx) 128 129 val f3_redirect = WireInit(false.B) 130 f3_flush := fromFtq.redirect.valid 131 f2_flush := f3_flush || f3_redirect 132 f1_flush := f2_flush || from_bpu_f1_flush 133 f0_flush := f1_flush || from_bpu_f0_flush 134 135 val f1_ready, f2_ready, f3_ready = WireInit(false.B) 136 137 //fetch: send addr to Meta/TLB and Data simultaneously 138 val fetch_req = List(toMeta, toData) 139 for(i <- 0 until 2) { 140 fetch_req(i).valid := f0_fire 141 fetch_req(i).bits.isDoubleLine := f0_doubleLine 142 fetch_req(i).bits.vSetIdx := f0_vSetIdx 143 } 144 145 fromFtq.req.ready := fetch_req(0).ready && fetch_req(1).ready && f1_ready && GTimer() > 500.U 146 147 XSPerfAccumulate("ifu_bubble_ftq_not_valid", !f0_valid ) 148 XSPerfAccumulate("ifu_bubble_pipe_stall", f0_valid && fetch_req(0).ready && fetch_req(1).ready && !f1_ready ) 149 XSPerfAccumulate("ifu_bubble_sram_0_busy", f0_valid && !fetch_req(0).ready ) 150 XSPerfAccumulate("ifu_bubble_sram_1_busy", f0_valid && !fetch_req(1).ready ) 151 152 //--------------------------------------------- 153 // Fetch Stage 2 : 154 // * Send req to ITLB and TLB Response (Get Paddr) 155 // * ICache Response (Get Meta and Data) 156 // * Hit Check (Generate hit signal and hit vector) 157 // * Get victim way 158 //--------------------------------------------- 159 160 //TODO: handle fetch exceptions 161 162 val tlbRespAllValid = WireInit(false.B) 163 164 val f1_valid = RegInit(false.B) 165 val f1_ftq_req = RegEnable(next = f0_ftq_req, enable=f0_fire) 166 val f1_situation = RegEnable(next = f0_situation, enable=f0_fire) 167 val f1_doubleLine = RegEnable(next = f0_doubleLine, enable=f0_fire) 168 val f1_vSetIdx = RegEnable(next = f0_vSetIdx, enable=f0_fire) 169 val f1_fire = f1_valid && tlbRespAllValid && f2_ready 170 171 f1_ready := f2_ready && tlbRespAllValid || !f1_valid 172 173 from_bpu_f1_flush := fromFtq.flushFromBpu.shouldFlushByStage3(f1_ftq_req.ftqIdx) 174 175 val preDecoder = Module(new PreDecode) 176 val (preDecoderIn, preDecoderOut) = (preDecoder.io.in, preDecoder.io.out) 177 178 //flush generate and to Ftq 179 val predecodeOutValid = WireInit(false.B) 180 181 when(f1_flush) {f1_valid := false.B} 182 .elsewhen(f0_fire && !f0_flush) {f1_valid := true.B} 183 .elsewhen(f1_fire) {f1_valid := false.B} 184 185 toITLB(0).valid := f1_valid 186 toITLB(0).bits.size := 3.U // TODO: fix the size 187 toITLB(0).bits.vaddr := align(f1_ftq_req.startAddr, blockBytes) 188 toITLB(0).bits.debug.pc := align(f1_ftq_req.startAddr, blockBytes) 189 190 toITLB(1).valid := f1_valid && f1_doubleLine 191 toITLB(1).bits.size := 3.U // TODO: fix the size 192 toITLB(1).bits.vaddr := align(f1_ftq_req.fallThruAddr, blockBytes) 193 toITLB(1).bits.debug.pc := align(f1_ftq_req.fallThruAddr, blockBytes) 194 195 toITLB.map{port => 196 port.bits.cmd := TlbCmd.exec 197 port.bits.robIdx := DontCare 198 port.bits.debug.isFirstIssue := DontCare 199 } 200 201 fromITLB.map(_.ready := true.B) 202 203 val (tlbRespValid, tlbRespPAddr) = (fromITLB.map(_.valid), VecInit(fromITLB.map(_.bits.paddr))) 204 val (tlbRespMiss, tlbRespMMIO) = (fromITLB.map(port => port.bits.miss && port.valid), fromITLB.map(port => port.bits.mmio && port.valid)) 205 val (tlbExcpPF, tlbExcpAF) = (fromITLB.map(port => port.bits.excp.pf.instr && port.valid), 206 fromITLB.map(port => (port.bits.excp.af.instr) && port.valid)) //TODO: Temp treat mmio req as access fault 207 208 209 tlbRespAllValid := tlbRespValid(0) && (tlbRespValid(1) || !f1_doubleLine) 210 211 val f1_pAddrs = tlbRespPAddr //TODO: Temporary assignment 212 val f1_pTags = VecInit(f1_pAddrs.map(get_phy_tag(_))) 213 val (f1_tags, f1_cacheline_valid, f1_datas) = (meta_resp.tags, meta_resp.valid, data_resp.datas) 214 val bank0_hit_vec = VecInit(f1_tags(0).zipWithIndex.map{ case(way_tag,i) => f1_cacheline_valid(0)(i) && way_tag === f1_pTags(0) }) 215 val bank1_hit_vec = VecInit(f1_tags(1).zipWithIndex.map{ case(way_tag,i) => f1_cacheline_valid(1)(i) && way_tag === f1_pTags(1) }) 216 val (bank0_hit,bank1_hit) = (ParallelOR(bank0_hit_vec) && !tlbExcpPF(0) && !tlbExcpAF(0), ParallelOR(bank1_hit_vec) && !tlbExcpPF(1) && !tlbExcpAF(1)) 217 val f1_hit = (bank0_hit && bank1_hit && f1_valid && f1_doubleLine) || (f1_valid && !f1_doubleLine && bank0_hit) 218 val f1_bank_hit_vec = VecInit(Seq(bank0_hit_vec, bank1_hit_vec)) 219 val f1_bank_hit = VecInit(Seq(bank0_hit, bank1_hit)) 220 221 val replacers = Seq.fill(2)(ReplacementPolicy.fromString(Some("random"),nWays,nSets/2)) 222 val f1_victim_masks = VecInit(replacers.zipWithIndex.map{case (replacer, i) => UIntToOH(replacer.way(f1_vSetIdx(i)))}) 223 224 val touch_sets = Seq.fill(2)(Wire(Vec(2, UInt(log2Ceil(nSets/2).W)))) 225 val touch_ways = Seq.fill(2)(Wire(Vec(2, Valid(UInt(log2Ceil(nWays).W)))) ) 226 227 ((replacers zip touch_sets) zip touch_ways).map{case ((r, s),w) => r.access(s,w)} 228 229 val f1_hit_data = VecInit(f1_datas.zipWithIndex.map { case(bank, i) => 230 val bank_hit_data = Mux1H(f1_bank_hit_vec(i).asUInt, bank) 231 bank_hit_data 232 }) 233 234 (0 until nWays).map{ w => 235 XSPerfAccumulate("line_0_hit_way_" + Integer.toString(w, 10), f1_fire && f1_bank_hit(0) && OHToUInt(f1_bank_hit_vec(0)) === w.U) 236 } 237 238 (0 until nWays).map{ w => 239 XSPerfAccumulate("line_0_victim_way_" + Integer.toString(w, 10), f1_fire && !f1_bank_hit(0) && OHToUInt(f1_victim_masks(0)) === w.U) 240 } 241 242 (0 until nWays).map{ w => 243 XSPerfAccumulate("line_1_hit_way_" + Integer.toString(w, 10), f1_fire && f1_doubleLine && f1_bank_hit(1) && OHToUInt(f1_bank_hit_vec(1)) === w.U) 244 } 245 246 (0 until nWays).map{ w => 247 XSPerfAccumulate("line_1_victim_way_" + Integer.toString(w, 10), f1_fire && f1_doubleLine && !f1_bank_hit(1) && OHToUInt(f1_victim_masks(1)) === w.U) 248 } 249 250 XSPerfAccumulate("ifu_bubble_f1_tlb_miss", f1_valid && !tlbRespAllValid ) 251 252 //--------------------------------------------- 253 // Fetch Stage 3 : 254 // * get data from last stage (hit from f1_hit_data/miss from missQueue response) 255 // * if at least one needed cacheline miss, wait for miss queue response (a wait_state machine) THIS IS TOO UGLY!!! 256 // * cut cacheline(s) and send to PreDecode 257 // * check if prediction is right (branch target and type, jump direction and type , jal target ) 258 //--------------------------------------------- 259 val f2_fetchFinish = Wire(Bool()) 260 261 val f2_valid = RegInit(false.B) 262 val f2_ftq_req = RegEnable(next = f1_ftq_req, enable = f1_fire) 263 val f2_situation = RegEnable(next = f1_situation, enable=f1_fire) 264 val f2_doubleLine = RegEnable(next = f1_doubleLine, enable=f1_fire) 265 val f2_fire = f2_valid && f2_fetchFinish && f3_ready 266 267 f2_ready := (f3_ready && f2_fetchFinish) || !f2_valid 268 269 when(f2_flush) {f2_valid := false.B} 270 .elsewhen(f1_fire && !f1_flush) {f2_valid := true.B } 271 .elsewhen(f2_fire) {f2_valid := false.B} 272 273 val pmpExcpAF = fromPMP.map(port => port.instr) 274 275 val f2_pAddrs = RegEnable(next = f1_pAddrs, enable = f1_fire) 276 val f2_hit = RegEnable(next = f1_hit , enable = f1_fire) 277 val f2_bank_hit = RegEnable(next = f1_bank_hit, enable = f1_fire) 278 val f2_miss = f2_valid && !f2_hit 279 val (f2_vSetIdx, f2_pTags) = (RegEnable(next = f1_vSetIdx, enable = f1_fire), RegEnable(next = f1_pTags, enable = f1_fire)) 280 val f2_waymask = RegEnable(next = f1_victim_masks, enable = f1_fire) 281 //exception information 282 val f2_except_pf = RegEnable(next = VecInit(tlbExcpPF), enable = f1_fire) 283 val f2_except_af = VecInit(RegEnable(next = VecInit(tlbExcpAF), enable = f1_fire).zip(pmpExcpAF).map(a => a._1 || DataHoldBypass(a._2, RegNext(f1_fire)).asBool)) 284 val f2_except = VecInit((0 until 2).map{i => f2_except_pf(i) || f2_except_af(i)}) 285 val f2_has_except = f2_valid && (f2_except_af.reduce(_||_) || f2_except_pf.reduce(_||_)) 286 // 287 io.pmp.zipWithIndex.map { case (p, i) => 288 p.req.valid := f2_fire 289 p.req.bits.addr := f2_pAddrs(i) 290 p.req.bits.size := 3.U // TODO 291 p.req.bits.cmd := TlbCmd.exec 292 } 293 294 //instruction 295 val wait_idle :: wait_queue_ready :: wait_send_req :: wait_two_resp :: wait_0_resp :: wait_1_resp :: wait_one_resp ::wait_finish :: Nil = Enum(8) 296 val wait_state = RegInit(wait_idle) 297 298 fromMissQueue.map{port => port.ready := true.B} 299 300 val (miss0_resp, miss1_resp) = (fromMissQueue(0).fire(), fromMissQueue(1).fire()) 301 val (bank0_fix, bank1_fix) = (miss0_resp && !f2_bank_hit(0), miss1_resp && f2_doubleLine && !f2_bank_hit(1)) 302 303 val only_0_miss = f2_valid && !f2_hit && !f2_doubleLine && !f2_has_except 304 val only_0_hit = f2_valid && f2_hit && !f2_doubleLine 305 val hit_0_hit_1 = f2_valid && f2_hit && f2_doubleLine 306 val (hit_0_miss_1 , miss_0_hit_1, miss_0_miss_1) = ( (f2_valid && !f2_bank_hit(1) && f2_bank_hit(0) && f2_doubleLine && !f2_has_except), 307 (f2_valid && !f2_bank_hit(0) && f2_bank_hit(1) && f2_doubleLine && !f2_has_except), 308 (f2_valid && !f2_bank_hit(0) && !f2_bank_hit(1) && f2_doubleLine && !f2_has_except), 309 ) 310 311 val hit_0_except_1 = f2_valid && f2_doubleLine && !f2_except(0) && f2_except(1) && f2_bank_hit(0) 312 val miss_0_except_1 = f2_valid && f2_doubleLine && !f2_except(0) && f2_except(1) && !f2_bank_hit(0) 313 //val fetch0_except_1 = hit_0_except_1 || miss_0_except_1 314 val except_0 = f2_valid && f2_except(0) 315 316 val f2_mq_datas = Reg(Vec(2, UInt(blockBits.W))) 317 318 when(fromMissQueue(0).fire) {f2_mq_datas(0) := fromMissQueue(0).bits.data} 319 when(fromMissQueue(1).fire) {f2_mq_datas(1) := fromMissQueue(1).bits.data} 320 321 switch(wait_state){ 322 is(wait_idle){ 323 when(miss_0_except_1){ 324 wait_state := Mux(toMissQueue(0).ready, wait_queue_ready ,wait_idle ) 325 }.elsewhen( only_0_miss || miss_0_hit_1){ 326 wait_state := Mux(toMissQueue(0).ready, wait_queue_ready ,wait_idle ) 327 }.elsewhen(hit_0_miss_1){ 328 wait_state := Mux(toMissQueue(1).ready, wait_queue_ready ,wait_idle ) 329 }.elsewhen( miss_0_miss_1 ){ 330 wait_state := Mux(toMissQueue(0).ready && toMissQueue(1).ready, wait_queue_ready ,wait_idle) 331 } 332 } 333 334 //TODO: naive logic for wait icache response 335 is(wait_queue_ready){ 336 wait_state := wait_send_req 337 } 338 339 is(wait_send_req) { 340 when(miss_0_except_1 || only_0_miss || hit_0_miss_1 || miss_0_hit_1){ 341 wait_state := wait_one_resp 342 }.elsewhen( miss_0_miss_1 ){ 343 wait_state := wait_two_resp 344 } 345 } 346 347 is(wait_one_resp) { 348 when( (miss_0_except_1 ||only_0_miss || miss_0_hit_1) && fromMissQueue(0).fire()){ 349 wait_state := wait_finish 350 }.elsewhen( hit_0_miss_1 && fromMissQueue(1).fire()){ 351 wait_state := wait_finish 352 } 353 } 354 355 is(wait_two_resp) { 356 when(fromMissQueue(0).fire() && fromMissQueue(1).fire()){ 357 wait_state := wait_finish 358 }.elsewhen( !fromMissQueue(0).fire() && fromMissQueue(1).fire() ){ 359 wait_state := wait_0_resp 360 }.elsewhen(fromMissQueue(0).fire() && !fromMissQueue(1).fire()){ 361 wait_state := wait_1_resp 362 } 363 } 364 365 is(wait_0_resp) { 366 when(fromMissQueue(0).fire()){ 367 wait_state := wait_finish 368 } 369 } 370 371 is(wait_1_resp) { 372 when(fromMissQueue(1).fire()){ 373 wait_state := wait_finish 374 } 375 } 376 377 is(wait_finish) { 378 when(f2_fire) {wait_state := wait_idle } 379 } 380 } 381 382 when(f2_flush) { wait_state := wait_idle } 383 384 (0 until 2).map { i => 385 if(i == 1) toMissQueue(i).valid := (hit_0_miss_1 || miss_0_miss_1) && wait_state === wait_queue_ready 386 else toMissQueue(i).valid := (only_0_miss || miss_0_hit_1 || miss_0_miss_1) && wait_state === wait_queue_ready 387 toMissQueue(i).bits.addr := f2_pAddrs(i) 388 toMissQueue(i).bits.vSetIdx := f2_vSetIdx(i) 389 toMissQueue(i).bits.waymask := f2_waymask(i) 390 toMissQueue(i).bits.clientID :=0.U 391 } 392 393 val miss_all_fix = (wait_state === wait_finish) 394 395 f2_fetchFinish := ((f2_valid && f2_hit) || miss_all_fix || hit_0_except_1 || except_0) 396 397 XSPerfAccumulate("ifu_bubble_f2_miss", f2_valid && !f2_fetchFinish ) 398 399 (touch_ways zip touch_sets).zipWithIndex.map{ case((t_w,t_s), i) => 400 t_s(0) := f1_vSetIdx(i) 401 t_w(0).valid := f1_bank_hit(i) 402 t_w(0).bits := OHToUInt(f1_bank_hit_vec(i)) 403 404 t_s(1) := f2_vSetIdx(i) 405 t_w(1).valid := f2_valid && !f2_bank_hit(i) 406 t_w(1).bits := OHToUInt(f2_waymask(i)) 407 } 408 409 val sec_miss_reg = RegInit(0.U.asTypeOf(Vec(4, Bool()))) 410 val reservedRefillData = Reg(Vec(2, UInt(blockBits.W))) 411 val f2_hit_datas = RegEnable(next = f1_hit_data, enable = f1_fire) 412 val f2_datas = Wire(Vec(2, UInt(blockBits.W))) 413 414 f2_datas.zipWithIndex.map{case(bank,i) => 415 if(i == 0) bank := Mux(f2_bank_hit(i), f2_hit_datas(i),Mux(sec_miss_reg(2),reservedRefillData(1),Mux(sec_miss_reg(0),reservedRefillData(0), f2_mq_datas(i)))) 416 else bank := Mux(f2_bank_hit(i), f2_hit_datas(i),Mux(sec_miss_reg(3),reservedRefillData(1),Mux(sec_miss_reg(1),reservedRefillData(0), f2_mq_datas(i)))) 417 } 418 419 val f2_jump_valids = Fill(PredictWidth, !preDecoderOut.cfiOffset.valid) | Fill(PredictWidth, 1.U(1.W)) >> (~preDecoderOut.cfiOffset.bits) 420 val f2_predecode_valids = VecInit(preDecoderOut.pd.map(instr => instr.valid)).asUInt & f2_jump_valids 421 422 def cut(cacheline: UInt, start: UInt) : Vec[UInt] ={ 423 if(HasCExtension){ 424 val result = Wire(Vec(PredictWidth + 1, UInt(16.W))) 425 val dataVec = cacheline.asTypeOf(Vec(blockBytes * 2/ 2, UInt(16.W))) 426 val startPtr = Cat(0.U(1.W), start(blockOffBits-1, 1)) 427 (0 until PredictWidth + 1).foreach( i => 428 result(i) := dataVec(startPtr + i.U) 429 ) 430 result 431 } else { 432 val result = Wire(Vec(PredictWidth, UInt(32.W)) ) 433 val dataVec = cacheline.asTypeOf(Vec(blockBytes * 2/ 4, UInt(32.W))) 434 val startPtr = Cat(0.U(1.W), start(blockOffBits-1, 2)) 435 (0 until PredictWidth).foreach( i => 436 result(i) := dataVec(startPtr + i.U) 437 ) 438 result 439 } 440 } 441 442 val f2_cut_data = cut( Cat(f2_datas.map(cacheline => cacheline.asUInt ).reverse).asUInt, f2_ftq_req.startAddr ) 443 444 // deal with secondary miss in f1 445 val f2_0_f1_0 = ((f2_valid && !f2_bank_hit(0)) && f1_valid && (get_block_addr(f2_ftq_req.startAddr) === get_block_addr(f1_ftq_req.startAddr))) 446 val f2_0_f1_1 = ((f2_valid && !f2_bank_hit(0)) && f1_valid && f1_doubleLine && (get_block_addr(f2_ftq_req.startAddr) === get_block_addr(f1_ftq_req.startAddr + blockBytes.U))) 447 val f2_1_f1_0 = ((f2_valid && !f2_bank_hit(1) && f2_doubleLine) && f1_valid && (get_block_addr(f2_ftq_req.startAddr+ blockBytes.U) === get_block_addr(f1_ftq_req.startAddr) )) 448 val f2_1_f1_1 = ((f2_valid && !f2_bank_hit(1) && f2_doubleLine) && f1_valid && f1_doubleLine && (get_block_addr(f2_ftq_req.startAddr+ blockBytes.U) === get_block_addr(f1_ftq_req.startAddr + blockBytes.U) )) 449 450 val isSameLine = f2_0_f1_0 || f2_0_f1_1 || f2_1_f1_0 || f2_1_f1_1 451 val sec_miss_sit = VecInit(Seq(f2_0_f1_0, f2_0_f1_1, f2_1_f1_0, f2_1_f1_1)) 452 val hasSecMiss = RegInit(false.B) 453 454 when(f2_flush){ 455 sec_miss_reg.map(sig => sig := false.B) 456 hasSecMiss := false.B 457 }.elsewhen(isSameLine && !f1_flush && f2_fire){ 458 sec_miss_reg.zipWithIndex.map{case(sig, i) => sig := sec_miss_sit(i)} 459 hasSecMiss := true.B 460 }.elsewhen((!isSameLine || f1_flush) && hasSecMiss && f2_fire){ 461 sec_miss_reg.map(sig => sig := false.B) 462 hasSecMiss := false.B 463 } 464 465 when((f2_0_f1_0 || f2_0_f1_1) && f2_fire){ 466 reservedRefillData(0) := f2_mq_datas(0) 467 } 468 469 when((f2_1_f1_0 || f2_1_f1_1) && f2_fire){ 470 reservedRefillData(1) := f2_mq_datas(1) 471 } 472 473 474 //--------------------------------------------- 475 // Fetch Stage 4 : 476 // * get data from last stage (hit from f1_hit_data/miss from missQueue response) 477 // * if at least one needed cacheline miss, wait for miss queue response (a wait_state machine) THIS IS TOO UGLY!!! 478 // * cut cacheline(s) and send to PreDecode 479 // * check if prediction is right (branch target and type, jump direction and type , jal target ) 480 //--------------------------------------------- 481 val f3_valid = RegInit(false.B) 482 val f3_ftq_req = RegEnable(next = f2_ftq_req, enable=f2_fire) 483 val f3_situation = RegEnable(next = f2_situation, enable=f2_fire) 484 val f3_doubleLine = RegEnable(next = f2_doubleLine, enable=f2_fire) 485 val f3_fire = io.toIbuffer.fire() 486 487 when(f3_flush) {f3_valid := false.B} 488 .elsewhen(f2_fire && !f2_flush) {f3_valid := true.B } 489 .elsewhen(io.toIbuffer.fire()) {f3_valid := false.B} 490 491 f3_ready := io.toIbuffer.ready || !f2_valid 492 493 val f3_cut_data = RegEnable(next = f2_cut_data, enable=f2_fire) 494 val f3_except_pf = RegEnable(next = f2_except_pf, enable = f2_fire) 495 val f3_except_af = RegEnable(next = f2_except_af, enable = f2_fire) 496 val f3_hit = RegEnable(next = f2_hit , enable = f2_fire) 497 498 val f3_lastHalf = RegInit(0.U.asTypeOf(new LastHalfInfo)) 499 val f3_lastHalfMatch = f3_lastHalf.matchThisBlock(f3_ftq_req.startAddr) 500 val f3_except = VecInit((0 until 2).map{i => f3_except_pf(i) || f3_except_af(i)}) 501 val f3_has_except = f3_valid && (f3_except_af.reduce(_||_) || f3_except_pf.reduce(_||_)) 502 503 //performance counter 504 val f3_only_0_hit = RegEnable(next = only_0_hit, enable = f2_fire) 505 val f3_only_0_miss = RegEnable(next = only_0_miss, enable = f2_fire) 506 val f3_hit_0_hit_1 = RegEnable(next = hit_0_hit_1, enable = f2_fire) 507 val f3_hit_0_miss_1 = RegEnable(next = hit_0_miss_1, enable = f2_fire) 508 val f3_miss_0_hit_1 = RegEnable(next = miss_0_hit_1, enable = f2_fire) 509 val f3_miss_0_miss_1 = RegEnable(next = miss_0_miss_1, enable = f2_fire) 510 511 val f3_bank_hit = RegEnable(next = f2_bank_hit, enable = f2_fire) 512 val f3_req_0 = io.toIbuffer.fire() 513 val f3_req_1 = io.toIbuffer.fire() && f3_doubleLine 514 val f3_hit_0 = io.toIbuffer.fire() & f3_bank_hit(0) 515 val f3_hit_1 = io.toIbuffer.fire() && f3_doubleLine & f3_bank_hit(1) 516 517 518 preDecoderIn.instValid := f3_valid && !f3_has_except 519 preDecoderIn.data := f3_cut_data 520 preDecoderIn.startAddr := f3_ftq_req.startAddr 521 preDecoderIn.fallThruAddr := f3_ftq_req.fallThruAddr 522 preDecoderIn.fallThruError := f3_ftq_req.fallThruError 523 preDecoderIn.isDoubleLine := f3_doubleLine 524 preDecoderIn.ftqOffset := f3_ftq_req.ftqOffset 525 preDecoderIn.target := f3_ftq_req.target 526 preDecoderIn.oversize := f3_ftq_req.oversize 527 preDecoderIn.lastHalfMatch := f3_lastHalfMatch 528 preDecoderIn.pageFault := f3_except_pf 529 preDecoderIn.accessFault := f3_except_af 530 531 532 // TODO: What if next packet does not match? 533 when (f3_flush) { 534 f3_lastHalf.valid := false.B 535 }.elsewhen (io.toIbuffer.fire()) { 536 f3_lastHalf.valid := preDecoderOut.hasLastHalf 537 f3_lastHalf.middlePC := preDecoderOut.realEndPC 538 } 539 540 val f3_predecode_range = VecInit(preDecoderOut.pd.map(inst => inst.valid)).asUInt 541 542 io.toIbuffer.valid := f3_valid 543 io.toIbuffer.bits.instrs := preDecoderOut.instrs 544 io.toIbuffer.bits.valid := f3_predecode_range & preDecoderOut.instrRange.asUInt 545 io.toIbuffer.bits.pd := preDecoderOut.pd 546 io.toIbuffer.bits.ftqPtr := f3_ftq_req.ftqIdx 547 io.toIbuffer.bits.pc := preDecoderOut.pc 548 io.toIbuffer.bits.ftqOffset.zipWithIndex.map{case(a, i) => a.bits := i.U; a.valid := preDecoderOut.takens(i)} 549 io.toIbuffer.bits.foldpc := preDecoderOut.pc.map(i => XORFold(i(VAddrBits-1,1), MemPredPCWidth)) 550 io.toIbuffer.bits.ipf := preDecoderOut.pageFault 551 io.toIbuffer.bits.acf := preDecoderOut.accessFault 552 io.toIbuffer.bits.crossPageIPFFix := preDecoderOut.crossPageIPF 553 554 //Write back to Ftq 555 val finishFetchMaskReg = RegNext(f3_valid && !(f2_fire && !f2_flush)) 556 557 toFtq.pdWb.valid := !finishFetchMaskReg && f3_valid 558 toFtq.pdWb.bits.pc := preDecoderOut.pc 559 toFtq.pdWb.bits.pd := preDecoderOut.pd 560 toFtq.pdWb.bits.pd.zipWithIndex.map{case(instr,i) => instr.valid := f3_predecode_range(i)} 561 toFtq.pdWb.bits.ftqIdx := f3_ftq_req.ftqIdx 562 toFtq.pdWb.bits.ftqOffset := f3_ftq_req.ftqOffset.bits 563 toFtq.pdWb.bits.misOffset := preDecoderOut.misOffset 564 toFtq.pdWb.bits.cfiOffset := preDecoderOut.cfiOffset 565 toFtq.pdWb.bits.target := preDecoderOut.target 566 toFtq.pdWb.bits.jalTarget := preDecoderOut.jalTarget 567 toFtq.pdWb.bits.instrRange := preDecoderOut.instrRange 568 569 val predecodeFlush = preDecoderOut.misOffset.valid && f3_valid 570 val predecodeFlushReg = RegNext(predecodeFlush && !(f2_fire && !f2_flush)) 571 572 573 f3_redirect := !predecodeFlushReg && predecodeFlush 574 575 XSPerfAccumulate("ifu_req", io.toIbuffer.fire() ) 576 XSPerfAccumulate("ifu_miss", io.toIbuffer.fire() && !f3_hit ) 577 XSPerfAccumulate("ifu_req_cacheline_0", f3_req_0 ) 578 XSPerfAccumulate("ifu_req_cacheline_1", f3_req_1 ) 579 XSPerfAccumulate("ifu_req_cacheline_0_hit", f3_hit_0 ) 580 XSPerfAccumulate("ifu_req_cacheline_1_hit", f3_hit_1 ) 581 XSPerfAccumulate("frontendFlush", f3_redirect ) 582 XSPerfAccumulate("only_0_hit", f3_only_0_hit && io.toIbuffer.fire() ) 583 XSPerfAccumulate("only_0_miss", f3_only_0_miss && io.toIbuffer.fire() ) 584 XSPerfAccumulate("hit_0_hit_1", f3_hit_0_hit_1 && io.toIbuffer.fire() ) 585 XSPerfAccumulate("hit_0_miss_1", f3_hit_0_miss_1 && io.toIbuffer.fire() ) 586 XSPerfAccumulate("miss_0_hit_1", f3_miss_0_hit_1 && io.toIbuffer.fire() ) 587 XSPerfAccumulate("miss_0_miss_1", f3_miss_0_miss_1 && io.toIbuffer.fire() ) 588 XSPerfAccumulate("cross_line_block", io.toIbuffer.fire() && f3_situation(0) ) 589 XSPerfAccumulate("fall_through_is_cacheline_end", io.toIbuffer.fire() && f3_situation(1) ) 590} 591