1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.frontend 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import freechips.rocketchip.rocket.RVCDecoder 23import xiangshan._ 24import xiangshan.cache.mmu._ 25import xiangshan.frontend.icache._ 26import utils._ 27import utility._ 28import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle} 29import utility.ChiselDB 30 31trait HasInstrMMIOConst extends HasXSParameter with HasIFUConst{ 32 def mmioBusWidth = 64 33 def mmioBusBytes = mmioBusWidth / 8 34 def maxInstrLen = 32 35} 36 37trait HasIFUConst extends HasXSParameter{ 38 def addrAlign(addr: UInt, bytes: Int, highest: Int): UInt = Cat(addr(highest-1, log2Ceil(bytes)), 0.U(log2Ceil(bytes).W)) 39 def fetchQueueSize = 2 40 41 def getBasicBlockIdx( pc: UInt, start: UInt ): UInt = { 42 val byteOffset = pc - start 43 (byteOffset - instBytes.U)(log2Ceil(PredictWidth),instOffsetBits) 44 } 45} 46 47class IfuToFtqIO(implicit p:Parameters) extends XSBundle { 48 val pdWb = Valid(new PredecodeWritebackBundle) 49} 50 51class FtqInterface(implicit p: Parameters) extends XSBundle { 52 val fromFtq = Flipped(new FtqToIfuIO) 53 val toFtq = new IfuToFtqIO 54} 55 56class UncacheInterface(implicit p: Parameters) extends XSBundle { 57 val fromUncache = Flipped(DecoupledIO(new InsUncacheResp)) 58 val toUncache = DecoupledIO( new InsUncacheReq ) 59} 60 61class NewIFUIO(implicit p: Parameters) extends XSBundle { 62 val ftqInter = new FtqInterface 63 val icacheInter = Flipped(new IFUICacheIO) 64 val icacheStop = Output(Bool()) 65 val icachePerfInfo = Input(new ICachePerfInfo) 66 val toIbuffer = Decoupled(new FetchToIBuffer) 67 val uncacheInter = new UncacheInterface 68 val frontendTrigger = Flipped(new FrontendTdataDistributeIO) 69 val csrTriggerEnable = Input(Vec(4, Bool())) 70 val rob_commits = Flipped(Vec(CommitWidth, Valid(new RobCommitInfo))) 71 val iTLBInter = new TlbRequestIO 72 val pmp = new ICachePMPBundle 73 val mmioCommitRead = new mmioCommitRead 74} 75 76// record the situation in which fallThruAddr falls into 77// the middle of an RVI inst 78class LastHalfInfo(implicit p: Parameters) extends XSBundle { 79 val valid = Bool() 80 val middlePC = UInt(VAddrBits.W) 81 def matchThisBlock(startAddr: UInt) = valid && middlePC === startAddr 82} 83 84class IfuToPreDecode(implicit p: Parameters) extends XSBundle { 85 val data = if(HasCExtension) Vec(PredictWidth + 1, UInt(16.W)) else Vec(PredictWidth, UInt(32.W)) 86 val frontendTrigger = new FrontendTdataDistributeIO 87 val csrTriggerEnable = Vec(4, Bool()) 88 val pc = Vec(PredictWidth, UInt(VAddrBits.W)) 89} 90 91 92class IfuToPredChecker(implicit p: Parameters) extends XSBundle { 93 val ftqOffset = Valid(UInt(log2Ceil(PredictWidth).W)) 94 val jumpOffset = Vec(PredictWidth, UInt(XLEN.W)) 95 val target = UInt(VAddrBits.W) 96 val instrRange = Vec(PredictWidth, Bool()) 97 val instrValid = Vec(PredictWidth, Bool()) 98 val pds = Vec(PredictWidth, new PreDecodeInfo) 99 val pc = Vec(PredictWidth, UInt(VAddrBits.W)) 100} 101 102class FetchToIBufferDB extends Bundle { 103 val start_addr = UInt(39.W) 104 val instr_count = UInt(32.W) 105 val exception = Bool() 106 val is_cache_hit = Bool() 107} 108 109class IfuWbToFtqDB extends Bundle { 110 val start_addr = UInt(39.W) 111 val is_miss_pred = Bool() 112 val miss_pred_offset = UInt(32.W) 113 val checkJalFault = Bool() 114 val checkRetFault = Bool() 115 val checkTargetFault = Bool() 116 val checkNotCFIFault = Bool() 117 val checkInvalidTaken = Bool() 118} 119 120class NewIFU(implicit p: Parameters) extends XSModule 121 with HasICacheParameters 122 with HasIFUConst 123 with HasPdConst 124 with HasCircularQueuePtrHelper 125 with HasPerfEvents 126{ 127 val io = IO(new NewIFUIO) 128 val (toFtq, fromFtq) = (io.ftqInter.toFtq, io.ftqInter.fromFtq) 129 val fromICache = io.icacheInter.resp 130 val (toUncache, fromUncache) = (io.uncacheInter.toUncache , io.uncacheInter.fromUncache) 131 132 def isCrossLineReq(start: UInt, end: UInt): Bool = start(blockOffBits) ^ end(blockOffBits) 133 134 def isLastInCacheline(addr: UInt): Bool = addr(blockOffBits - 1, 1) === 0.U 135 136 def numOfStage = 3 137 require(numOfStage > 1, "BPU numOfStage must be greater than 1") 138 val topdown_stages = RegInit(VecInit(Seq.fill(numOfStage)(0.U.asTypeOf(new FrontendTopDownBundle)))) 139 // bubble events in IFU, only happen in stage 1 140 val icacheMissBubble = Wire(Bool()) 141 val itlbMissBubble =Wire(Bool()) 142 143 // only driven by clock, not valid-ready 144 topdown_stages(0) := fromFtq.req.bits.topdown_info 145 for (i <- 1 until numOfStage) { 146 topdown_stages(i) := topdown_stages(i - 1) 147 } 148 when (icacheMissBubble) { 149 topdown_stages(1).reasons(TopDownCounters.ICacheMissBubble.id) := true.B 150 } 151 when (itlbMissBubble) { 152 topdown_stages(1).reasons(TopDownCounters.ITLBMissBubble.id) := true.B 153 } 154 io.toIbuffer.bits.topdown_info := topdown_stages(numOfStage - 1) 155 when (fromFtq.topdown_redirect.valid) { 156 // only redirect from backend, IFU redirect itself is handled elsewhere 157 when (fromFtq.topdown_redirect.bits.debugIsCtrl) { 158 /* 159 for (i <- 0 until numOfStage) { 160 topdown_stages(i).reasons(TopDownCounters.ControlRedirectBubble.id) := true.B 161 } 162 io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.ControlRedirectBubble.id) := true.B 163 */ 164 when (fromFtq.topdown_redirect.bits.ControlBTBMissBubble) { 165 for (i <- 0 until numOfStage) { 166 topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id) := true.B 167 } 168 io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.BTBMissBubble.id) := true.B 169 } .elsewhen (fromFtq.topdown_redirect.bits.TAGEMissBubble) { 170 for (i <- 0 until numOfStage) { 171 topdown_stages(i).reasons(TopDownCounters.TAGEMissBubble.id) := true.B 172 } 173 io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.TAGEMissBubble.id) := true.B 174 } .elsewhen (fromFtq.topdown_redirect.bits.SCMissBubble) { 175 for (i <- 0 until numOfStage) { 176 topdown_stages(i).reasons(TopDownCounters.SCMissBubble.id) := true.B 177 } 178 io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.SCMissBubble.id) := true.B 179 } .elsewhen (fromFtq.topdown_redirect.bits.ITTAGEMissBubble) { 180 for (i <- 0 until numOfStage) { 181 topdown_stages(i).reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B 182 } 183 io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B 184 } .elsewhen (fromFtq.topdown_redirect.bits.RASMissBubble) { 185 for (i <- 0 until numOfStage) { 186 topdown_stages(i).reasons(TopDownCounters.RASMissBubble.id) := true.B 187 } 188 io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.RASMissBubble.id) := true.B 189 } 190 } .elsewhen (fromFtq.topdown_redirect.bits.debugIsMemVio) { 191 for (i <- 0 until numOfStage) { 192 topdown_stages(i).reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B 193 } 194 io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B 195 } .otherwise { 196 for (i <- 0 until numOfStage) { 197 topdown_stages(i).reasons(TopDownCounters.OtherRedirectBubble.id) := true.B 198 } 199 io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.OtherRedirectBubble.id) := true.B 200 } 201 } 202 203 class TlbExept(implicit p: Parameters) extends XSBundle{ 204 val pageFault = Bool() 205 val accessFault = Bool() 206 val mmio = Bool() 207 } 208 209 val preDecoders = Seq.fill(4){ Module(new PreDecode) } 210 211 val predChecker = Module(new PredChecker) 212 val frontendTrigger = Module(new FrontendTrigger) 213 val (checkerIn, checkerOutStage1, checkerOutStage2) = (predChecker.io.in, predChecker.io.out.stage1Out,predChecker.io.out.stage2Out) 214 215 io.iTLBInter.req_kill := false.B 216 io.iTLBInter.resp.ready := true.B 217 218 /** 219 ****************************************************************************** 220 * IFU Stage 0 221 * - send cacheline fetch request to ICacheMainPipe 222 ****************************************************************************** 223 */ 224 225 val f0_valid = fromFtq.req.valid 226 val f0_ftq_req = fromFtq.req.bits 227 val f0_doubleLine = fromFtq.req.bits.crossCacheline 228 val f0_vSetIdx = VecInit(get_idx((f0_ftq_req.startAddr)), get_idx(f0_ftq_req.nextlineStart)) 229 val f0_fire = fromFtq.req.fire 230 231 val f0_flush, f1_flush, f2_flush, f3_flush = WireInit(false.B) 232 val from_bpu_f0_flush, from_bpu_f1_flush, from_bpu_f2_flush, from_bpu_f3_flush = WireInit(false.B) 233 234 from_bpu_f0_flush := fromFtq.flushFromBpu.shouldFlushByStage2(f0_ftq_req.ftqIdx) || 235 fromFtq.flushFromBpu.shouldFlushByStage3(f0_ftq_req.ftqIdx) 236 237 val wb_redirect , mmio_redirect, backend_redirect= WireInit(false.B) 238 val f3_wb_not_flush = WireInit(false.B) 239 240 backend_redirect := fromFtq.redirect.valid 241 f3_flush := backend_redirect || (wb_redirect && !f3_wb_not_flush) 242 f2_flush := backend_redirect || mmio_redirect || wb_redirect 243 f1_flush := f2_flush || from_bpu_f1_flush 244 f0_flush := f1_flush || from_bpu_f0_flush 245 246 val f1_ready, f2_ready, f3_ready = WireInit(false.B) 247 248 fromFtq.req.ready := f1_ready && io.icacheInter.icacheReady 249 250 251 when (wb_redirect) { 252 when (f3_wb_not_flush) { 253 topdown_stages(2).reasons(TopDownCounters.BTBMissBubble.id) := true.B 254 } 255 for (i <- 0 until numOfStage - 1) { 256 topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id) := true.B 257 } 258 } 259 260 /** <PERF> f0 fetch bubble */ 261 262 XSPerfAccumulate("fetch_bubble_ftq_not_valid", !fromFtq.req.valid && fromFtq.req.ready ) 263 // XSPerfAccumulate("fetch_bubble_pipe_stall", f0_valid && toICache(0).ready && toICache(1).ready && !f1_ready ) 264 // XSPerfAccumulate("fetch_bubble_icache_0_busy", f0_valid && !toICache(0).ready ) 265 // XSPerfAccumulate("fetch_bubble_icache_1_busy", f0_valid && !toICache(1).ready ) 266 XSPerfAccumulate("fetch_flush_backend_redirect", backend_redirect ) 267 XSPerfAccumulate("fetch_flush_wb_redirect", wb_redirect ) 268 XSPerfAccumulate("fetch_flush_bpu_f1_flush", from_bpu_f1_flush ) 269 XSPerfAccumulate("fetch_flush_bpu_f0_flush", from_bpu_f0_flush ) 270 271 272 /** 273 ****************************************************************************** 274 * IFU Stage 1 275 * - calculate pc/half_pc/cut_ptr for every instruction 276 ****************************************************************************** 277 */ 278 279 val f1_valid = RegInit(false.B) 280 val f1_ftq_req = RegEnable(f0_ftq_req, f0_fire) 281 // val f1_situation = RegEnable(f0_situation, f0_fire) 282 val f1_doubleLine = RegEnable(f0_doubleLine, f0_fire) 283 val f1_vSetIdx = RegEnable(f0_vSetIdx, f0_fire) 284 val f1_fire = f1_valid && f2_ready 285 286 f1_ready := f1_fire || !f1_valid 287 288 from_bpu_f1_flush := fromFtq.flushFromBpu.shouldFlushByStage3(f1_ftq_req.ftqIdx) && f1_valid 289 // from_bpu_f1_flush := false.B 290 291 when(f1_flush) {f1_valid := false.B} 292 .elsewhen(f0_fire && !f0_flush) {f1_valid := true.B} 293 .elsewhen(f1_fire) {f1_valid := false.B} 294 295 val f1_pc = VecInit((0 until PredictWidth).map(i => f1_ftq_req.startAddr + (i * 2).U)) 296 val f1_half_snpc = VecInit((0 until PredictWidth).map(i => f1_ftq_req.startAddr + ((i+2) * 2).U)) 297 val f1_cut_ptr = if(HasCExtension) VecInit((0 until PredictWidth + 1).map(i => Cat(0.U(1.W), f1_ftq_req.startAddr(blockOffBits-1, 1)) + i.U )) 298 else VecInit((0 until PredictWidth).map(i => Cat(0.U(1.W), f1_ftq_req.startAddr(blockOffBits-1, 2)) + i.U )) 299 300 /** 301 ****************************************************************************** 302 * IFU Stage 2 303 * - icache response data (latched for pipeline stop) 304 * - generate exceprion bits for every instruciton (page fault/access fault/mmio) 305 * - generate predicted instruction range (1 means this instruciton is in this fetch packet) 306 * - cut data from cachlines to packet instruction code 307 * - instruction predecode and RVC expand 308 ****************************************************************************** 309 */ 310 311 val icacheRespAllValid = WireInit(false.B) 312 313 val f2_valid = RegInit(false.B) 314 val f2_ftq_req = RegEnable(f1_ftq_req, f1_fire) 315 // val f2_situation = RegEnable(f1_situation, f1_fire) 316 val f2_doubleLine = RegEnable(f1_doubleLine, f1_fire) 317 val f2_vSetIdx = RegEnable(f1_vSetIdx, f1_fire) 318 val f2_fire = f2_valid && f3_ready && icacheRespAllValid 319 320 f2_ready := f2_fire || !f2_valid 321 //TODO: addr compare may be timing critical 322 val f2_icache_all_resp_wire = fromICache(0).valid && (fromICache(0).bits.vaddr === f2_ftq_req.startAddr) && ((fromICache(1).valid && (fromICache(1).bits.vaddr === f2_ftq_req.nextlineStart)) || !f2_doubleLine) 323 val f2_icache_all_resp_reg = RegInit(false.B) 324 325 icacheRespAllValid := f2_icache_all_resp_reg || f2_icache_all_resp_wire 326 327 icacheMissBubble := io.icacheInter.topdownIcacheMiss 328 itlbMissBubble := io.icacheInter.topdownItlbMiss 329 330 io.icacheStop := !f3_ready 331 332 when(f2_flush) {f2_icache_all_resp_reg := false.B} 333 .elsewhen(f2_valid && f2_icache_all_resp_wire && !f3_ready) {f2_icache_all_resp_reg := true.B} 334 .elsewhen(f2_fire && f2_icache_all_resp_reg) {f2_icache_all_resp_reg := false.B} 335 336 when(f2_flush) {f2_valid := false.B} 337 .elsewhen(f1_fire && !f1_flush) {f2_valid := true.B } 338 .elsewhen(f2_fire) {f2_valid := false.B} 339 340 // val f2_cache_response_data = ResultHoldBypass(valid = f2_icache_all_resp_wire, data = VecInit(fromICache.map(_.bits.readData))) 341 val f2_cache_response_reg_data = VecInit(fromICache.map(_.bits.registerData)) 342 val f2_cache_response_sram_data = VecInit(fromICache.map(_.bits.sramData)) 343 val f2_cache_response_select = VecInit(fromICache.map(_.bits.select)) 344 345 346 val f2_except_pf = VecInit((0 until PortNumber).map(i => fromICache(i).bits.tlbExcp.pageFault)) 347 val f2_except_af = VecInit((0 until PortNumber).map(i => fromICache(i).bits.tlbExcp.accessFault)) 348 val f2_mmio = fromICache(0).bits.tlbExcp.mmio && !fromICache(0).bits.tlbExcp.accessFault && 349 !fromICache(0).bits.tlbExcp.pageFault 350 351 val f2_pc = RegEnable(f1_pc, f1_fire) 352 val f2_half_snpc = RegEnable(f1_half_snpc, f1_fire) 353 val f2_cut_ptr = RegEnable(f1_cut_ptr, f1_fire) 354 355 val f2_resend_vaddr = RegEnable(f1_ftq_req.startAddr + 2.U, f1_fire) 356 357 def isNextLine(pc: UInt, startAddr: UInt) = { 358 startAddr(blockOffBits) ^ pc(blockOffBits) 359 } 360 361 def isLastInLine(pc: UInt) = { 362 pc(blockOffBits - 1, 0) === "b111110".U 363 } 364 365 val f2_foldpc = VecInit(f2_pc.map(i => XORFold(i(VAddrBits-1,1), MemPredPCWidth))) 366 val f2_jump_range = Fill(PredictWidth, !f2_ftq_req.ftqOffset.valid) | Fill(PredictWidth, 1.U(1.W)) >> ~f2_ftq_req.ftqOffset.bits 367 val f2_ftr_range = Fill(PredictWidth, f2_ftq_req.ftqOffset.valid) | Fill(PredictWidth, 1.U(1.W)) >> ~getBasicBlockIdx(f2_ftq_req.nextStartAddr, f2_ftq_req.startAddr) 368 val f2_instr_range = f2_jump_range & f2_ftr_range 369 val f2_pf_vec = VecInit((0 until PredictWidth).map(i => (!isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_except_pf(0) || isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_doubleLine && f2_except_pf(1)))) 370 val f2_af_vec = VecInit((0 until PredictWidth).map(i => (!isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_except_af(0) || isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_doubleLine && f2_except_af(1)))) 371 372 val f2_paddrs = VecInit((0 until PortNumber).map(i => fromICache(i).bits.paddr)) 373 val f2_perf_info = io.icachePerfInfo 374 375 def cut(cacheline: UInt, cutPtr: Vec[UInt]) : Vec[UInt] ={ 376 require(HasCExtension) 377 // if(HasCExtension){ 378 val partCacheline = cacheline((blockBytes * 8 * 2 * 3) / 4 - 1, 0) 379 val result = Wire(Vec(PredictWidth + 1, UInt(16.W))) 380 val dataVec = cacheline.asTypeOf(Vec(blockBytes * 3 /4, UInt(16.W))) //47 16-bit data vector 381 (0 until PredictWidth + 1).foreach( i => 382 result(i) := dataVec(cutPtr(i)) //the max ptr is 3*blockBytes/4-1 383 ) 384 result 385 // } else { 386 // val result = Wire(Vec(PredictWidth, UInt(32.W)) ) 387 // val dataVec = cacheline.asTypeOf(Vec(blockBytes * 2/ 4, UInt(32.W))) 388 // (0 until PredictWidth).foreach( i => 389 // result(i) := dataVec(cutPtr(i)) 390 // ) 391 // result 392 // } 393 } 394 395 val f2_data_2_cacheline = Wire(Vec(4, UInt((2 * blockBits).W))) 396 f2_data_2_cacheline(0) := Cat(f2_cache_response_reg_data(1) , f2_cache_response_reg_data(0)) 397 f2_data_2_cacheline(1) := Cat(f2_cache_response_reg_data(1) , f2_cache_response_sram_data(0)) 398 f2_data_2_cacheline(2) := Cat(f2_cache_response_sram_data(1) , f2_cache_response_reg_data(0)) 399 f2_data_2_cacheline(3) := Cat(f2_cache_response_sram_data(1) , f2_cache_response_sram_data(0)) 400 401 val f2_cut_data = VecInit(f2_data_2_cacheline.map(data => cut( data, f2_cut_ptr ))) 402 403 val f2_predecod_ptr = Wire(UInt(2.W)) 404 f2_predecod_ptr := Cat(f2_cache_response_select(1),f2_cache_response_select(0)) 405 406 /** predecode (include RVC expander) */ 407 // preDecoderRegIn.data := f2_reg_cut_data 408 // preDecoderRegInIn.frontendTrigger := io.frontendTrigger 409 // preDecoderRegInIn.csrTriggerEnable := io.csrTriggerEnable 410 // preDecoderRegIn.pc := f2_pc 411 412 val preDecoderOut = Mux1H(UIntToOH(f2_predecod_ptr), preDecoders.map(_.io.out)) 413 for(i <- 0 until 4){ 414 val preDecoderIn = preDecoders(i).io.in 415 preDecoderIn.data := f2_cut_data(i) 416 preDecoderIn.frontendTrigger := io.frontendTrigger 417 preDecoderIn.csrTriggerEnable := io.csrTriggerEnable 418 preDecoderIn.pc := f2_pc 419 } 420 421 //val f2_expd_instr = preDecoderOut.expInstr 422 val f2_instr = preDecoderOut.instr 423 val f2_pd = preDecoderOut.pd 424 val f2_jump_offset = preDecoderOut.jumpOffset 425 val f2_hasHalfValid = preDecoderOut.hasHalfValid 426 val f2_crossPageFault = VecInit((0 until PredictWidth).map(i => isLastInLine(f2_pc(i)) && !f2_except_pf(0) && f2_doubleLine && f2_except_pf(1) && !f2_pd(i).isRVC )) 427 428 XSPerfAccumulate("fetch_bubble_icache_not_resp", f2_valid && !icacheRespAllValid ) 429 430 431 /** 432 ****************************************************************************** 433 * IFU Stage 3 434 * - handle MMIO instruciton 435 * -send request to Uncache fetch Unit 436 * -every packet include 1 MMIO instruction 437 * -MMIO instructions will stop fetch pipeline until commiting from RoB 438 * -flush to snpc (send ifu_redirect to Ftq) 439 * - Ibuffer enqueue 440 * - check predict result in Frontend (jalFault/retFault/notCFIFault/invalidTakenFault/targetFault) 441 * - handle last half RVI instruction 442 ****************************************************************************** 443 */ 444 445 val f3_valid = RegInit(false.B) 446 val f3_ftq_req = RegEnable(f2_ftq_req, f2_fire) 447 // val f3_situation = RegEnable(f2_situation, f2_fire) 448 val f3_doubleLine = RegEnable(f2_doubleLine, f2_fire) 449 val f3_fire = io.toIbuffer.fire 450 451 f3_ready := f3_fire || !f3_valid 452 453 val f3_cut_data = RegEnable(f2_cut_data(f2_predecod_ptr), f2_fire) 454 455 val f3_except_pf = RegEnable(f2_except_pf, f2_fire) 456 val f3_except_af = RegEnable(f2_except_af, f2_fire) 457 val f3_mmio = RegEnable(f2_mmio , f2_fire) 458 459 //val f3_expd_instr = RegEnable(f2_expd_instr, f2_fire) 460 val f3_instr = RegEnable(f2_instr, f2_fire) 461 val f3_expd_instr = VecInit((0 until PredictWidth).map{ i => 462 val expander = Module(new RVCExpander) 463 expander.io.in := f3_instr(i) 464 expander.io.out.bits 465 }) 466 467 val f3_pd_wire = RegEnable(f2_pd, f2_fire) 468 val f3_pd = WireInit(f3_pd_wire) 469 val f3_jump_offset = RegEnable(f2_jump_offset, f2_fire) 470 val f3_af_vec = RegEnable(f2_af_vec, f2_fire) 471 val f3_pf_vec = RegEnable(f2_pf_vec , f2_fire) 472 val f3_pc = RegEnable(f2_pc, f2_fire) 473 val f3_half_snpc = RegEnable(f2_half_snpc, f2_fire) 474 val f3_instr_range = RegEnable(f2_instr_range, f2_fire) 475 val f3_foldpc = RegEnable(f2_foldpc, f2_fire) 476 val f3_crossPageFault = RegEnable(f2_crossPageFault, f2_fire) 477 val f3_hasHalfValid = RegEnable(f2_hasHalfValid, f2_fire) 478 val f3_except = VecInit((0 until 2).map{i => f3_except_pf(i) || f3_except_af(i)}) 479 val f3_has_except = f3_valid && (f3_except_af.reduce(_||_) || f3_except_pf.reduce(_||_)) 480 val f3_pAddrs = RegEnable(f2_paddrs, f2_fire) 481 val f3_resend_vaddr = RegEnable(f2_resend_vaddr, f2_fire) 482 483 // Expand 1 bit to prevent overflow when assert 484 val f3_ftq_req_startAddr = Cat(0.U(1.W), f3_ftq_req.startAddr) 485 val f3_ftq_req_nextStartAddr = Cat(0.U(1.W), f3_ftq_req.nextStartAddr) 486 // brType, isCall and isRet generation is delayed to f3 stage 487 val f3Predecoder = Module(new F3Predecoder) 488 489 f3Predecoder.io.in.instr := f3_instr 490 491 f3_pd.zipWithIndex.map{ case (pd,i) => 492 pd.brType := f3Predecoder.io.out.pd(i).brType 493 pd.isCall := f3Predecoder.io.out.pd(i).isCall 494 pd.isRet := f3Predecoder.io.out.pd(i).isRet 495 } 496 497 val f3PdDiff = f3_pd_wire.zip(f3_pd).map{ case (a,b) => a.asUInt =/= b.asUInt }.reduce(_||_) 498 XSError(f3_valid && f3PdDiff, "f3 pd diff") 499 500 when(f3_valid && !f3_ftq_req.ftqOffset.valid){ 501 assert(f3_ftq_req_startAddr + (2*PredictWidth).U >= f3_ftq_req_nextStartAddr, s"More tha ${2*PredictWidth} Bytes fetch is not allowed!") 502 } 503 504 /*** MMIO State Machine***/ 505 val f3_mmio_data = Reg(Vec(2, UInt(16.W))) 506 val mmio_is_RVC = RegInit(false.B) 507 val mmio_resend_addr =RegInit(0.U(PAddrBits.W)) 508 val mmio_resend_af = RegInit(false.B) 509 val mmio_resend_pf = RegInit(false.B) 510 511 //last instuction finish 512 val is_first_instr = RegInit(true.B) 513 io.mmioCommitRead.mmioFtqPtr := RegNext(f3_ftq_req.ftqIdx + 1.U) 514 515 val m_idle :: m_waitLastCmt:: m_sendReq :: m_waitResp :: m_sendTLB :: m_tlbResp :: m_sendPMP :: m_resendReq :: m_waitResendResp :: m_waitCommit :: m_commited :: Nil = Enum(11) 516 val mmio_state = RegInit(m_idle) 517 518 val f3_req_is_mmio = f3_mmio && f3_valid 519 val mmio_commit = VecInit(io.rob_commits.map{commit => commit.valid && commit.bits.ftqIdx === f3_ftq_req.ftqIdx && commit.bits.ftqOffset === 0.U}).asUInt.orR 520 val f3_mmio_req_commit = f3_req_is_mmio && mmio_state === m_commited 521 522 val f3_mmio_to_commit = f3_req_is_mmio && mmio_state === m_waitCommit 523 val f3_mmio_to_commit_next = RegNext(f3_mmio_to_commit) 524 val f3_mmio_can_go = f3_mmio_to_commit && !f3_mmio_to_commit_next 525 526 val fromFtqRedirectReg = RegNext(fromFtq.redirect,init = 0.U.asTypeOf(fromFtq.redirect)) 527 val mmioF3Flush = RegNext(f3_flush,init = false.B) 528 val f3_ftq_flush_self = fromFtqRedirectReg.valid && RedirectLevel.flushItself(fromFtqRedirectReg.bits.level) 529 val f3_ftq_flush_by_older = fromFtqRedirectReg.valid && isBefore(fromFtqRedirectReg.bits.ftqIdx, f3_ftq_req.ftqIdx) 530 531 val f3_need_not_flush = f3_req_is_mmio && fromFtqRedirectReg.valid && !f3_ftq_flush_self && !f3_ftq_flush_by_older 532 533 when(is_first_instr && mmio_commit){ 534 is_first_instr := false.B 535 } 536 537 when(f3_flush && !f3_req_is_mmio) {f3_valid := false.B} 538 .elsewhen(mmioF3Flush && f3_req_is_mmio && !f3_need_not_flush) {f3_valid := false.B} 539 .elsewhen(f2_fire && !f2_flush ) {f3_valid := true.B } 540 .elsewhen(io.toIbuffer.fire && !f3_req_is_mmio) {f3_valid := false.B} 541 .elsewhen{f3_req_is_mmio && f3_mmio_req_commit} {f3_valid := false.B} 542 543 val f3_mmio_use_seq_pc = RegInit(false.B) 544 545 val (redirect_ftqIdx, redirect_ftqOffset) = (fromFtqRedirectReg.bits.ftqIdx,fromFtqRedirectReg.bits.ftqOffset) 546 val redirect_mmio_req = fromFtqRedirectReg.valid && redirect_ftqIdx === f3_ftq_req.ftqIdx && redirect_ftqOffset === 0.U 547 548 when(RegNext(f2_fire && !f2_flush) && f3_req_is_mmio) { f3_mmio_use_seq_pc := true.B } 549 .elsewhen(redirect_mmio_req) { f3_mmio_use_seq_pc := false.B } 550 551 f3_ready := Mux(f3_req_is_mmio, io.toIbuffer.ready && f3_mmio_req_commit || !f3_valid , io.toIbuffer.ready || !f3_valid) 552 553 // mmio state machine 554 switch(mmio_state){ 555 is(m_idle){ 556 when(f3_req_is_mmio){ 557 mmio_state := m_waitLastCmt 558 } 559 } 560 561 is(m_waitLastCmt){ 562 when(is_first_instr){ 563 mmio_state := m_sendReq 564 }.otherwise{ 565 mmio_state := Mux(io.mmioCommitRead.mmioLastCommit, m_sendReq, m_waitLastCmt) 566 } 567 } 568 569 is(m_sendReq){ 570 mmio_state := Mux(toUncache.fire, m_waitResp, m_sendReq ) 571 } 572 573 is(m_waitResp){ 574 when(fromUncache.fire){ 575 val isRVC = fromUncache.bits.data(1,0) =/= 3.U 576 val needResend = !isRVC && f3_pAddrs(0)(2,1) === 3.U 577 mmio_state := Mux(needResend, m_sendTLB , m_waitCommit) 578 579 mmio_is_RVC := isRVC 580 f3_mmio_data(0) := fromUncache.bits.data(15,0) 581 f3_mmio_data(1) := fromUncache.bits.data(31,16) 582 } 583 } 584 585 is(m_sendTLB){ 586 when( io.iTLBInter.req.valid && !io.iTLBInter.resp.bits.miss ){ 587 mmio_state := m_tlbResp 588 } 589 } 590 591 is(m_tlbResp){ 592 val tlbExept = io.iTLBInter.resp.bits.excp(0).pf.instr || 593 io.iTLBInter.resp.bits.excp(0).af.instr 594 mmio_state := Mux(tlbExept,m_waitCommit,m_sendPMP) 595 mmio_resend_addr := io.iTLBInter.resp.bits.paddr(0) 596 mmio_resend_af := mmio_resend_af || io.iTLBInter.resp.bits.excp(0).af.instr 597 mmio_resend_pf := mmio_resend_pf || io.iTLBInter.resp.bits.excp(0).pf.instr 598 } 599 600 is(m_sendPMP){ 601 val pmpExcpAF = io.pmp.resp.instr || !io.pmp.resp.mmio 602 mmio_state := Mux(pmpExcpAF, m_waitCommit , m_resendReq) 603 mmio_resend_af := pmpExcpAF 604 } 605 606 is(m_resendReq){ 607 mmio_state := Mux(toUncache.fire, m_waitResendResp, m_resendReq ) 608 } 609 610 is(m_waitResendResp){ 611 when(fromUncache.fire){ 612 mmio_state := m_waitCommit 613 f3_mmio_data(1) := fromUncache.bits.data(15,0) 614 } 615 } 616 617 is(m_waitCommit){ 618 when(mmio_commit){ 619 mmio_state := m_commited 620 } 621 } 622 623 //normal mmio instruction 624 is(m_commited){ 625 mmio_state := m_idle 626 mmio_is_RVC := false.B 627 mmio_resend_addr := 0.U 628 } 629 } 630 631 //exception or flush by older branch prediction 632 when(f3_ftq_flush_self || f3_ftq_flush_by_older) { 633 mmio_state := m_idle 634 mmio_is_RVC := false.B 635 mmio_resend_addr := 0.U 636 mmio_resend_af := false.B 637 f3_mmio_data.map(_ := 0.U) 638 } 639 640 toUncache.valid := ((mmio_state === m_sendReq) || (mmio_state === m_resendReq)) && f3_req_is_mmio 641 toUncache.bits.addr := Mux((mmio_state === m_resendReq), mmio_resend_addr, f3_pAddrs(0)) 642 fromUncache.ready := true.B 643 644 io.iTLBInter.req.valid := (mmio_state === m_sendTLB) && f3_req_is_mmio 645 io.iTLBInter.req.bits.size := 3.U 646 io.iTLBInter.req.bits.vaddr := f3_resend_vaddr 647 io.iTLBInter.req.bits.debug.pc := f3_resend_vaddr 648 649 io.iTLBInter.req.bits.kill := false.B // IFU use itlb for mmio, doesn't need sync, set it to false 650 io.iTLBInter.req.bits.cmd := TlbCmd.exec 651 io.iTLBInter.req.bits.memidx := DontCare 652 io.iTLBInter.req.bits.debug.robIdx := DontCare 653 io.iTLBInter.req.bits.no_translate := false.B 654 io.iTLBInter.req.bits.debug.isFirstIssue := DontCare 655 656 io.pmp.req.valid := (mmio_state === m_sendPMP) && f3_req_is_mmio 657 io.pmp.req.bits.addr := mmio_resend_addr 658 io.pmp.req.bits.size := 3.U 659 io.pmp.req.bits.cmd := TlbCmd.exec 660 661 val f3_lastHalf = RegInit(0.U.asTypeOf(new LastHalfInfo)) 662 663 val f3_predecode_range = VecInit(preDecoderOut.pd.map(inst => inst.valid)).asUInt 664 val f3_mmio_range = VecInit((0 until PredictWidth).map(i => if(i ==0) true.B else false.B)) 665 val f3_instr_valid = Wire(Vec(PredictWidth, Bool())) 666 667 /*** prediction result check ***/ 668 checkerIn.ftqOffset := f3_ftq_req.ftqOffset 669 checkerIn.jumpOffset := f3_jump_offset 670 checkerIn.target := f3_ftq_req.nextStartAddr 671 checkerIn.instrRange := f3_instr_range.asTypeOf(Vec(PredictWidth, Bool())) 672 checkerIn.instrValid := f3_instr_valid.asTypeOf(Vec(PredictWidth, Bool())) 673 checkerIn.pds := f3_pd 674 checkerIn.pc := f3_pc 675 676 /*** handle half RVI in the last 2 Bytes ***/ 677 678 def hasLastHalf(idx: UInt) = { 679 //!f3_pd(idx).isRVC && checkerOutStage1.fixedRange(idx) && f3_instr_valid(idx) && !checkerOutStage1.fixedTaken(idx) && !checkerOutStage2.fixedMissPred(idx) && ! f3_req_is_mmio 680 !f3_pd(idx).isRVC && checkerOutStage1.fixedRange(idx) && f3_instr_valid(idx) && !checkerOutStage1.fixedTaken(idx) && ! f3_req_is_mmio 681 } 682 683 val f3_last_validIdx = ParallelPosteriorityEncoder(checkerOutStage1.fixedRange) 684 685 val f3_hasLastHalf = hasLastHalf((PredictWidth - 1).U) 686 val f3_false_lastHalf = hasLastHalf(f3_last_validIdx) 687 val f3_false_snpc = f3_half_snpc(f3_last_validIdx) 688 689 val f3_lastHalf_mask = VecInit((0 until PredictWidth).map( i => if(i ==0) false.B else true.B )).asUInt 690 val f3_lastHalf_disable = RegInit(false.B) 691 692 when(f3_flush || (f3_fire && f3_lastHalf_disable)){ 693 f3_lastHalf_disable := false.B 694 } 695 696 when (f3_flush) { 697 f3_lastHalf.valid := false.B 698 }.elsewhen (f3_fire) { 699 f3_lastHalf.valid := f3_hasLastHalf && !f3_lastHalf_disable 700 f3_lastHalf.middlePC := f3_ftq_req.nextStartAddr 701 } 702 703 f3_instr_valid := Mux(f3_lastHalf.valid,f3_hasHalfValid ,VecInit(f3_pd.map(inst => inst.valid))) 704 705 /*** frontend Trigger ***/ 706 frontendTrigger.io.pds := f3_pd 707 frontendTrigger.io.pc := f3_pc 708 frontendTrigger.io.data := f3_cut_data 709 710 frontendTrigger.io.frontendTrigger := io.frontendTrigger 711 frontendTrigger.io.csrTriggerEnable := io.csrTriggerEnable 712 713 val f3_triggered = frontendTrigger.io.triggered 714 715 /*** send to Ibuffer ***/ 716 717 io.toIbuffer.valid := f3_valid && (!f3_req_is_mmio || f3_mmio_can_go) && !f3_flush 718 io.toIbuffer.bits.instrs := f3_expd_instr 719 io.toIbuffer.bits.valid := f3_instr_valid.asUInt 720 io.toIbuffer.bits.enqEnable := checkerOutStage1.fixedRange.asUInt & f3_instr_valid.asUInt 721 io.toIbuffer.bits.pd := f3_pd 722 io.toIbuffer.bits.ftqPtr := f3_ftq_req.ftqIdx 723 io.toIbuffer.bits.pc := f3_pc 724 io.toIbuffer.bits.ftqOffset.zipWithIndex.map{case(a, i) => a.bits := i.U; a.valid := checkerOutStage1.fixedTaken(i) && !f3_req_is_mmio} 725 io.toIbuffer.bits.foldpc := f3_foldpc 726 io.toIbuffer.bits.ipf := VecInit(f3_pf_vec.zip(f3_crossPageFault).map{case (pf, crossPF) => pf || crossPF}) 727 io.toIbuffer.bits.acf := f3_af_vec 728 io.toIbuffer.bits.crossPageIPFFix := f3_crossPageFault 729 io.toIbuffer.bits.triggered := f3_triggered 730 731 when(f3_lastHalf.valid){ 732 io.toIbuffer.bits.enqEnable := checkerOutStage1.fixedRange.asUInt & f3_instr_valid.asUInt & f3_lastHalf_mask 733 io.toIbuffer.bits.valid := f3_lastHalf_mask & f3_instr_valid.asUInt 734 } 735 736 737 738 //Write back to Ftq 739 val f3_cache_fetch = f3_valid && !(f2_fire && !f2_flush) 740 val finishFetchMaskReg = RegNext(f3_cache_fetch) 741 742 val mmioFlushWb = Wire(Valid(new PredecodeWritebackBundle)) 743 val f3_mmio_missOffset = Wire(ValidUndirectioned(UInt(log2Ceil(PredictWidth).W))) 744 f3_mmio_missOffset.valid := f3_req_is_mmio 745 f3_mmio_missOffset.bits := 0.U 746 747 mmioFlushWb.valid := (f3_req_is_mmio && mmio_state === m_waitCommit && RegNext(fromUncache.fire) && f3_mmio_use_seq_pc) 748 mmioFlushWb.bits.pc := f3_pc 749 mmioFlushWb.bits.pd := f3_pd 750 mmioFlushWb.bits.pd.zipWithIndex.map{case(instr,i) => instr.valid := f3_mmio_range(i)} 751 mmioFlushWb.bits.ftqIdx := f3_ftq_req.ftqIdx 752 mmioFlushWb.bits.ftqOffset := f3_ftq_req.ftqOffset.bits 753 mmioFlushWb.bits.misOffset := f3_mmio_missOffset 754 mmioFlushWb.bits.cfiOffset := DontCare 755 mmioFlushWb.bits.target := Mux(mmio_is_RVC, f3_ftq_req.startAddr + 2.U , f3_ftq_req.startAddr + 4.U) 756 mmioFlushWb.bits.jalTarget := DontCare 757 mmioFlushWb.bits.instrRange := f3_mmio_range 758 759 /** external predecode for MMIO instruction */ 760 when(f3_req_is_mmio){ 761 val inst = Cat(f3_mmio_data(1), f3_mmio_data(0)) 762 val currentIsRVC = isRVC(inst) 763 764 val brType::isCall::isRet::Nil = brInfo(inst) 765 val jalOffset = jal_offset(inst, currentIsRVC) 766 val brOffset = br_offset(inst, currentIsRVC) 767 768 io.toIbuffer.bits.instrs(0) := new RVCDecoder(inst, XLEN, useAddiForMv = true).decode.bits 769 770 771 io.toIbuffer.bits.pd(0).valid := true.B 772 io.toIbuffer.bits.pd(0).isRVC := currentIsRVC 773 io.toIbuffer.bits.pd(0).brType := brType 774 io.toIbuffer.bits.pd(0).isCall := isCall 775 io.toIbuffer.bits.pd(0).isRet := isRet 776 777 io.toIbuffer.bits.acf(0) := mmio_resend_af 778 io.toIbuffer.bits.ipf(0) := mmio_resend_pf 779 io.toIbuffer.bits.crossPageIPFFix(0) := mmio_resend_pf 780 781 io.toIbuffer.bits.enqEnable := f3_mmio_range.asUInt 782 783 mmioFlushWb.bits.pd(0).valid := true.B 784 mmioFlushWb.bits.pd(0).isRVC := currentIsRVC 785 mmioFlushWb.bits.pd(0).brType := brType 786 mmioFlushWb.bits.pd(0).isCall := isCall 787 mmioFlushWb.bits.pd(0).isRet := isRet 788 } 789 790 mmio_redirect := (f3_req_is_mmio && mmio_state === m_waitCommit && RegNext(fromUncache.fire) && f3_mmio_use_seq_pc) 791 792 XSPerfAccumulate("fetch_bubble_ibuffer_not_ready", io.toIbuffer.valid && !io.toIbuffer.ready ) 793 794 795 /** 796 ****************************************************************************** 797 * IFU Write Back Stage 798 * - write back predecode information to Ftq to update 799 * - redirect if found fault prediction 800 * - redirect if has false hit last half (last PC is not start + 32 Bytes, but in the midle of an notCFI RVI instruction) 801 ****************************************************************************** 802 */ 803 804 val wb_valid = RegNext(RegNext(f2_fire && !f2_flush) && !f3_req_is_mmio && !f3_flush) 805 val wb_ftq_req = RegNext(f3_ftq_req) 806 807 val wb_check_result_stage1 = RegNext(checkerOutStage1) 808 val wb_check_result_stage2 = checkerOutStage2 809 val wb_instr_range = RegNext(io.toIbuffer.bits.enqEnable) 810 val wb_pc = RegNext(f3_pc) 811 val wb_pd = RegNext(f3_pd) 812 val wb_instr_valid = RegNext(f3_instr_valid) 813 814 /* false hit lastHalf */ 815 val wb_lastIdx = RegNext(f3_last_validIdx) 816 val wb_false_lastHalf = RegNext(f3_false_lastHalf) && wb_lastIdx =/= (PredictWidth - 1).U 817 val wb_false_target = RegNext(f3_false_snpc) 818 819 val wb_half_flush = wb_false_lastHalf 820 val wb_half_target = wb_false_target 821 822 /* false oversize */ 823 val lastIsRVC = wb_instr_range.asTypeOf(Vec(PredictWidth,Bool())).last && wb_pd.last.isRVC 824 val lastIsRVI = wb_instr_range.asTypeOf(Vec(PredictWidth,Bool()))(PredictWidth - 2) && !wb_pd(PredictWidth - 2).isRVC 825 val lastTaken = wb_check_result_stage1.fixedTaken.last 826 827 f3_wb_not_flush := wb_ftq_req.ftqIdx === f3_ftq_req.ftqIdx && f3_valid && wb_valid 828 829 /** if a req with a last half but miss predicted enters in wb stage, and this cycle f3 stalls, 830 * we set a flag to notify f3 that the last half flag need not to be set. 831 */ 832 //f3_fire is after wb_valid 833 when(wb_valid && RegNext(f3_hasLastHalf,init = false.B) 834 && wb_check_result_stage2.fixedMissPred(PredictWidth - 1) && !f3_fire && !RegNext(f3_fire,init = false.B) && !f3_flush 835 ){ 836 f3_lastHalf_disable := true.B 837 } 838 839 //wb_valid and f3_fire are in same cycle 840 when(wb_valid && RegNext(f3_hasLastHalf,init = false.B) 841 && wb_check_result_stage2.fixedMissPred(PredictWidth - 1) && f3_fire 842 ){ 843 f3_lastHalf.valid := false.B 844 } 845 846 val checkFlushWb = Wire(Valid(new PredecodeWritebackBundle)) 847 val checkFlushWbjalTargetIdx = ParallelPriorityEncoder(VecInit(wb_pd.zip(wb_instr_valid).map{case (pd, v) => v && pd.isJal })) 848 val checkFlushWbTargetIdx = ParallelPriorityEncoder(wb_check_result_stage2.fixedMissPred) 849 checkFlushWb.valid := wb_valid 850 checkFlushWb.bits.pc := wb_pc 851 checkFlushWb.bits.pd := wb_pd 852 checkFlushWb.bits.pd.zipWithIndex.map{case(instr,i) => instr.valid := wb_instr_valid(i)} 853 checkFlushWb.bits.ftqIdx := wb_ftq_req.ftqIdx 854 checkFlushWb.bits.ftqOffset := wb_ftq_req.ftqOffset.bits 855 checkFlushWb.bits.misOffset.valid := ParallelOR(wb_check_result_stage2.fixedMissPred) || wb_half_flush 856 checkFlushWb.bits.misOffset.bits := Mux(wb_half_flush, wb_lastIdx, ParallelPriorityEncoder(wb_check_result_stage2.fixedMissPred)) 857 checkFlushWb.bits.cfiOffset.valid := ParallelOR(wb_check_result_stage1.fixedTaken) 858 checkFlushWb.bits.cfiOffset.bits := ParallelPriorityEncoder(wb_check_result_stage1.fixedTaken) 859 checkFlushWb.bits.target := Mux(wb_half_flush, wb_half_target, wb_check_result_stage2.fixedTarget(checkFlushWbTargetIdx)) 860 checkFlushWb.bits.jalTarget := wb_check_result_stage2.jalTarget(checkFlushWbjalTargetIdx) 861 checkFlushWb.bits.instrRange := wb_instr_range.asTypeOf(Vec(PredictWidth, Bool())) 862 863 toFtq.pdWb := Mux(wb_valid, checkFlushWb, mmioFlushWb) 864 865 wb_redirect := checkFlushWb.bits.misOffset.valid && wb_valid 866 867 /*write back flush type*/ 868 val checkFaultType = wb_check_result_stage2.faultType 869 val checkJalFault = wb_valid && checkFaultType.map(_.isjalFault).reduce(_||_) 870 val checkRetFault = wb_valid && checkFaultType.map(_.isRetFault).reduce(_||_) 871 val checkTargetFault = wb_valid && checkFaultType.map(_.istargetFault).reduce(_||_) 872 val checkNotCFIFault = wb_valid && checkFaultType.map(_.notCFIFault).reduce(_||_) 873 val checkInvalidTaken = wb_valid && checkFaultType.map(_.invalidTakenFault).reduce(_||_) 874 875 876 XSPerfAccumulate("predecode_flush_jalFault", checkJalFault ) 877 XSPerfAccumulate("predecode_flush_retFault", checkRetFault ) 878 XSPerfAccumulate("predecode_flush_targetFault", checkTargetFault ) 879 XSPerfAccumulate("predecode_flush_notCFIFault", checkNotCFIFault ) 880 XSPerfAccumulate("predecode_flush_incalidTakenFault", checkInvalidTaken ) 881 882 when(checkRetFault){ 883 XSDebug("startAddr:%x nextstartAddr:%x taken:%d takenIdx:%d\n", 884 wb_ftq_req.startAddr, wb_ftq_req.nextStartAddr, wb_ftq_req.ftqOffset.valid, wb_ftq_req.ftqOffset.bits) 885 } 886 887 888 /** performance counter */ 889 val f3_perf_info = RegEnable(f2_perf_info, f2_fire) 890 val f3_req_0 = io.toIbuffer.fire 891 val f3_req_1 = io.toIbuffer.fire && f3_doubleLine 892 val f3_hit_0 = io.toIbuffer.fire && f3_perf_info.bank_hit(0) 893 val f3_hit_1 = io.toIbuffer.fire && f3_doubleLine & f3_perf_info.bank_hit(1) 894 val f3_hit = f3_perf_info.hit 895 val perfEvents = Seq( 896 ("frontendFlush ", wb_redirect ), 897 ("ifu_req ", io.toIbuffer.fire ), 898 ("ifu_miss ", io.toIbuffer.fire && !f3_perf_info.hit ), 899 ("ifu_req_cacheline_0 ", f3_req_0 ), 900 ("ifu_req_cacheline_1 ", f3_req_1 ), 901 ("ifu_req_cacheline_0_hit ", f3_hit_1 ), 902 ("ifu_req_cacheline_1_hit ", f3_hit_1 ), 903 ("only_0_hit ", f3_perf_info.only_0_hit && io.toIbuffer.fire ), 904 ("only_0_miss ", f3_perf_info.only_0_miss && io.toIbuffer.fire ), 905 ("hit_0_hit_1 ", f3_perf_info.hit_0_hit_1 && io.toIbuffer.fire ), 906 ("hit_0_miss_1 ", f3_perf_info.hit_0_miss_1 && io.toIbuffer.fire ), 907 ("miss_0_hit_1 ", f3_perf_info.miss_0_hit_1 && io.toIbuffer.fire ), 908 ("miss_0_miss_1 ", f3_perf_info.miss_0_miss_1 && io.toIbuffer.fire ), 909 ) 910 generatePerfEvent() 911 912 XSPerfAccumulate("ifu_req", io.toIbuffer.fire ) 913 XSPerfAccumulate("ifu_miss", io.toIbuffer.fire && !f3_hit ) 914 XSPerfAccumulate("ifu_req_cacheline_0", f3_req_0 ) 915 XSPerfAccumulate("ifu_req_cacheline_1", f3_req_1 ) 916 XSPerfAccumulate("ifu_req_cacheline_0_hit", f3_hit_0 ) 917 XSPerfAccumulate("ifu_req_cacheline_1_hit", f3_hit_1 ) 918 XSPerfAccumulate("frontendFlush", wb_redirect ) 919 XSPerfAccumulate("only_0_hit", f3_perf_info.only_0_hit && io.toIbuffer.fire ) 920 XSPerfAccumulate("only_0_miss", f3_perf_info.only_0_miss && io.toIbuffer.fire ) 921 XSPerfAccumulate("hit_0_hit_1", f3_perf_info.hit_0_hit_1 && io.toIbuffer.fire ) 922 XSPerfAccumulate("hit_0_miss_1", f3_perf_info.hit_0_miss_1 && io.toIbuffer.fire ) 923 XSPerfAccumulate("miss_0_hit_1", f3_perf_info.miss_0_hit_1 && io.toIbuffer.fire ) 924 XSPerfAccumulate("miss_0_miss_1", f3_perf_info.miss_0_miss_1 && io.toIbuffer.fire ) 925 XSPerfAccumulate("hit_0_except_1", f3_perf_info.hit_0_except_1 && io.toIbuffer.fire ) 926 XSPerfAccumulate("miss_0_except_1", f3_perf_info.miss_0_except_1 && io.toIbuffer.fire ) 927 XSPerfAccumulate("except_0", f3_perf_info.except_0 && io.toIbuffer.fire ) 928 XSPerfHistogram("ifu2ibuffer_validCnt", PopCount(io.toIbuffer.bits.valid & io.toIbuffer.bits.enqEnable), io.toIbuffer.fire, 0, PredictWidth + 1, 1) 929 930 val isWriteFetchToIBufferTable = WireInit(Constantin.createRecord("isWriteFetchToIBufferTable" + p(XSCoreParamsKey).HartId.toString)) 931 val isWriteIfuWbToFtqTable = WireInit(Constantin.createRecord("isWriteIfuWbToFtqTable" + p(XSCoreParamsKey).HartId.toString)) 932 val fetchToIBufferTable = ChiselDB.createTable("FetchToIBuffer" + p(XSCoreParamsKey).HartId.toString, new FetchToIBufferDB) 933 val ifuWbToFtqTable = ChiselDB.createTable("IfuWbToFtq" + p(XSCoreParamsKey).HartId.toString, new IfuWbToFtqDB) 934 935 val fetchIBufferDumpData = Wire(new FetchToIBufferDB) 936 fetchIBufferDumpData.start_addr := f3_ftq_req.startAddr 937 fetchIBufferDumpData.instr_count := PopCount(io.toIbuffer.bits.enqEnable) 938 fetchIBufferDumpData.exception := (f3_perf_info.except_0 && io.toIbuffer.fire) || (f3_perf_info.hit_0_except_1 && io.toIbuffer.fire) || (f3_perf_info.miss_0_except_1 && io.toIbuffer.fire) 939 fetchIBufferDumpData.is_cache_hit := f3_hit 940 941 val ifuWbToFtqDumpData = Wire(new IfuWbToFtqDB) 942 ifuWbToFtqDumpData.start_addr := wb_ftq_req.startAddr 943 ifuWbToFtqDumpData.is_miss_pred := checkFlushWb.bits.misOffset.valid 944 ifuWbToFtqDumpData.miss_pred_offset := checkFlushWb.bits.misOffset.bits 945 ifuWbToFtqDumpData.checkJalFault := checkJalFault 946 ifuWbToFtqDumpData.checkRetFault := checkRetFault 947 ifuWbToFtqDumpData.checkTargetFault := checkTargetFault 948 ifuWbToFtqDumpData.checkNotCFIFault := checkNotCFIFault 949 ifuWbToFtqDumpData.checkInvalidTaken := checkInvalidTaken 950 951 fetchToIBufferTable.log( 952 data = fetchIBufferDumpData, 953 en = isWriteFetchToIBufferTable.orR && io.toIbuffer.fire, 954 site = "IFU" + p(XSCoreParamsKey).HartId.toString, 955 clock = clock, 956 reset = reset 957 ) 958 ifuWbToFtqTable.log( 959 data = ifuWbToFtqDumpData, 960 en = isWriteIfuWbToFtqTable.orR && checkFlushWb.valid, 961 site = "IFU" + p(XSCoreParamsKey).HartId.toString, 962 clock = clock, 963 reset = reset 964 ) 965 966} 967