1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.frontend 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import freechips.rocketchip.rocket.RVCDecoder 23import xiangshan._ 24import xiangshan.cache.mmu._ 25import xiangshan.frontend.icache._ 26import utils._ 27import utility._ 28import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle} 29import utility.ChiselDB 30 31trait HasInstrMMIOConst extends HasXSParameter with HasIFUConst{ 32 def mmioBusWidth = 64 33 def mmioBusBytes = mmioBusWidth / 8 34 def maxInstrLen = 32 35} 36 37trait HasIFUConst extends HasXSParameter{ 38 def addrAlign(addr: UInt, bytes: Int, highest: Int): UInt = Cat(addr(highest-1, log2Ceil(bytes)), 0.U(log2Ceil(bytes).W)) 39 def fetchQueueSize = 2 40 41 def getBasicBlockIdx( pc: UInt, start: UInt ): UInt = { 42 val byteOffset = pc - start 43 (byteOffset - instBytes.U)(log2Ceil(PredictWidth),instOffsetBits) 44 } 45} 46 47class IfuToFtqIO(implicit p:Parameters) extends XSBundle { 48 val pdWb = Valid(new PredecodeWritebackBundle) 49} 50 51class IfuToBackendIO(implicit p:Parameters) extends XSBundle { 52 // write to backend gpaddr mem 53 val gpaddrMem_wen = Output(Bool()) 54 val gpaddrMem_waddr = Output(UInt(log2Ceil(FtqSize).W)) // Ftq Ptr 55 // 2 gpaddrs, correspond to startAddr & nextLineAddr in bundle FtqICacheInfo 56 // TODO: avoid cross page entry in Ftq 57 val gpaddrMem_wdata = Output(UInt(GPAddrBits.W)) 58} 59 60class FtqInterface(implicit p: Parameters) extends XSBundle { 61 val fromFtq = Flipped(new FtqToIfuIO) 62 val toFtq = new IfuToFtqIO 63} 64 65class UncacheInterface(implicit p: Parameters) extends XSBundle { 66 val fromUncache = Flipped(DecoupledIO(new InsUncacheResp)) 67 val toUncache = DecoupledIO( new InsUncacheReq ) 68} 69 70class NewIFUIO(implicit p: Parameters) extends XSBundle { 71 val ftqInter = new FtqInterface 72 val icacheInter = Flipped(new IFUICacheIO) 73 val icacheStop = Output(Bool()) 74 val icachePerfInfo = Input(new ICachePerfInfo) 75 val toIbuffer = Decoupled(new FetchToIBuffer) 76 val toBackend = new IfuToBackendIO 77 val uncacheInter = new UncacheInterface 78 val frontendTrigger = Flipped(new FrontendTdataDistributeIO) 79 val rob_commits = Flipped(Vec(CommitWidth, Valid(new RobCommitInfo))) 80 val iTLBInter = new TlbRequestIO 81 val pmp = new ICachePMPBundle 82 val mmioCommitRead = new mmioCommitRead 83} 84 85// record the situation in which fallThruAddr falls into 86// the middle of an RVI inst 87class LastHalfInfo(implicit p: Parameters) extends XSBundle { 88 val valid = Bool() 89 val middlePC = UInt(VAddrBits.W) 90 def matchThisBlock(startAddr: UInt) = valid && middlePC === startAddr 91} 92 93class IfuToPreDecode(implicit p: Parameters) extends XSBundle { 94 val data = if(HasCExtension) Vec(PredictWidth + 1, UInt(16.W)) else Vec(PredictWidth, UInt(32.W)) 95 val frontendTrigger = new FrontendTdataDistributeIO 96 val pc = Vec(PredictWidth, UInt(VAddrBits.W)) 97} 98 99 100class IfuToPredChecker(implicit p: Parameters) extends XSBundle { 101 val ftqOffset = Valid(UInt(log2Ceil(PredictWidth).W)) 102 val jumpOffset = Vec(PredictWidth, UInt(XLEN.W)) 103 val target = UInt(VAddrBits.W) 104 val instrRange = Vec(PredictWidth, Bool()) 105 val instrValid = Vec(PredictWidth, Bool()) 106 val pds = Vec(PredictWidth, new PreDecodeInfo) 107 val pc = Vec(PredictWidth, UInt(VAddrBits.W)) 108 val fire_in = Bool() 109} 110 111class FetchToIBufferDB extends Bundle { 112 val start_addr = UInt(39.W) 113 val instr_count = UInt(32.W) 114 val exception = Bool() 115 val is_cache_hit = Bool() 116} 117 118class IfuWbToFtqDB extends Bundle { 119 val start_addr = UInt(39.W) 120 val is_miss_pred = Bool() 121 val miss_pred_offset = UInt(32.W) 122 val checkJalFault = Bool() 123 val checkRetFault = Bool() 124 val checkTargetFault = Bool() 125 val checkNotCFIFault = Bool() 126 val checkInvalidTaken = Bool() 127} 128 129class NewIFU(implicit p: Parameters) extends XSModule 130 with HasICacheParameters 131 with HasIFUConst 132 with HasPdConst 133 with HasCircularQueuePtrHelper 134 with HasPerfEvents 135 with HasTlbConst 136{ 137 val io = IO(new NewIFUIO) 138 val (toFtq, fromFtq) = (io.ftqInter.toFtq, io.ftqInter.fromFtq) 139 val fromICache = io.icacheInter.resp 140 val (toUncache, fromUncache) = (io.uncacheInter.toUncache , io.uncacheInter.fromUncache) 141 142 def isCrossLineReq(start: UInt, end: UInt): Bool = start(blockOffBits) ^ end(blockOffBits) 143 144 def numOfStage = 3 145 // equal lower_result overflow bit 146 def PcCutPoint = (VAddrBits/4) - 1 147 def CatPC(low: UInt, high: UInt, high1: UInt): UInt = { 148 Mux( 149 low(PcCutPoint), 150 Cat(high1, low(PcCutPoint-1, 0)), 151 Cat(high, low(PcCutPoint-1, 0)) 152 ) 153 } 154 def CatPC(lowVec: Vec[UInt], high: UInt, high1: UInt): Vec[UInt] = VecInit(lowVec.map(CatPC(_, high, high1))) 155 require(numOfStage > 1, "BPU numOfStage must be greater than 1") 156 val topdown_stages = RegInit(VecInit(Seq.fill(numOfStage)(0.U.asTypeOf(new FrontendTopDownBundle)))) 157 // bubble events in IFU, only happen in stage 1 158 val icacheMissBubble = Wire(Bool()) 159 val itlbMissBubble =Wire(Bool()) 160 161 // only driven by clock, not valid-ready 162 topdown_stages(0) := fromFtq.req.bits.topdown_info 163 for (i <- 1 until numOfStage) { 164 topdown_stages(i) := topdown_stages(i - 1) 165 } 166 when (icacheMissBubble) { 167 topdown_stages(1).reasons(TopDownCounters.ICacheMissBubble.id) := true.B 168 } 169 when (itlbMissBubble) { 170 topdown_stages(1).reasons(TopDownCounters.ITLBMissBubble.id) := true.B 171 } 172 io.toIbuffer.bits.topdown_info := topdown_stages(numOfStage - 1) 173 when (fromFtq.topdown_redirect.valid) { 174 // only redirect from backend, IFU redirect itself is handled elsewhere 175 when (fromFtq.topdown_redirect.bits.debugIsCtrl) { 176 /* 177 for (i <- 0 until numOfStage) { 178 topdown_stages(i).reasons(TopDownCounters.ControlRedirectBubble.id) := true.B 179 } 180 io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.ControlRedirectBubble.id) := true.B 181 */ 182 when (fromFtq.topdown_redirect.bits.ControlBTBMissBubble) { 183 for (i <- 0 until numOfStage) { 184 topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id) := true.B 185 } 186 io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.BTBMissBubble.id) := true.B 187 } .elsewhen (fromFtq.topdown_redirect.bits.TAGEMissBubble) { 188 for (i <- 0 until numOfStage) { 189 topdown_stages(i).reasons(TopDownCounters.TAGEMissBubble.id) := true.B 190 } 191 io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.TAGEMissBubble.id) := true.B 192 } .elsewhen (fromFtq.topdown_redirect.bits.SCMissBubble) { 193 for (i <- 0 until numOfStage) { 194 topdown_stages(i).reasons(TopDownCounters.SCMissBubble.id) := true.B 195 } 196 io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.SCMissBubble.id) := true.B 197 } .elsewhen (fromFtq.topdown_redirect.bits.ITTAGEMissBubble) { 198 for (i <- 0 until numOfStage) { 199 topdown_stages(i).reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B 200 } 201 io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B 202 } .elsewhen (fromFtq.topdown_redirect.bits.RASMissBubble) { 203 for (i <- 0 until numOfStage) { 204 topdown_stages(i).reasons(TopDownCounters.RASMissBubble.id) := true.B 205 } 206 io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.RASMissBubble.id) := true.B 207 } 208 } .elsewhen (fromFtq.topdown_redirect.bits.debugIsMemVio) { 209 for (i <- 0 until numOfStage) { 210 topdown_stages(i).reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B 211 } 212 io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B 213 } .otherwise { 214 for (i <- 0 until numOfStage) { 215 topdown_stages(i).reasons(TopDownCounters.OtherRedirectBubble.id) := true.B 216 } 217 io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.OtherRedirectBubble.id) := true.B 218 } 219 } 220 221 class TlbExept(implicit p: Parameters) extends XSBundle{ 222 val pageFault = Bool() 223 val accessFault = Bool() 224 val mmio = Bool() 225 } 226 227 val preDecoder = Module(new PreDecode) 228 229 val predChecker = Module(new PredChecker) 230 val frontendTrigger = Module(new FrontendTrigger) 231 val (checkerIn, checkerOutStage1, checkerOutStage2) = (predChecker.io.in, predChecker.io.out.stage1Out,predChecker.io.out.stage2Out) 232 233 io.iTLBInter.req_kill := false.B 234 io.iTLBInter.resp.ready := true.B 235 236 /** 237 ****************************************************************************** 238 * IFU Stage 0 239 * - send cacheline fetch request to ICacheMainPipe 240 ****************************************************************************** 241 */ 242 243 val f0_valid = fromFtq.req.valid 244 val f0_ftq_req = fromFtq.req.bits 245 val f0_doubleLine = fromFtq.req.bits.crossCacheline 246 val f0_vSetIdx = VecInit(get_idx((f0_ftq_req.startAddr)), get_idx(f0_ftq_req.nextlineStart)) 247 val f0_fire = fromFtq.req.fire 248 249 val f0_flush, f1_flush, f2_flush, f3_flush = WireInit(false.B) 250 val from_bpu_f0_flush, from_bpu_f1_flush, from_bpu_f2_flush, from_bpu_f3_flush = WireInit(false.B) 251 252 from_bpu_f0_flush := fromFtq.flushFromBpu.shouldFlushByStage2(f0_ftq_req.ftqIdx) || 253 fromFtq.flushFromBpu.shouldFlushByStage3(f0_ftq_req.ftqIdx) 254 255 val wb_redirect , mmio_redirect, backend_redirect= WireInit(false.B) 256 val f3_wb_not_flush = WireInit(false.B) 257 258 backend_redirect := fromFtq.redirect.valid 259 f3_flush := backend_redirect || (wb_redirect && !f3_wb_not_flush) 260 f2_flush := backend_redirect || mmio_redirect || wb_redirect 261 f1_flush := f2_flush || from_bpu_f1_flush 262 f0_flush := f1_flush || from_bpu_f0_flush 263 264 val f1_ready, f2_ready, f3_ready = WireInit(false.B) 265 266 fromFtq.req.ready := f1_ready && io.icacheInter.icacheReady 267 268 269 when (wb_redirect) { 270 when (f3_wb_not_flush) { 271 topdown_stages(2).reasons(TopDownCounters.BTBMissBubble.id) := true.B 272 } 273 for (i <- 0 until numOfStage - 1) { 274 topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id) := true.B 275 } 276 } 277 278 /** <PERF> f0 fetch bubble */ 279 280 XSPerfAccumulate("fetch_bubble_ftq_not_valid", !fromFtq.req.valid && fromFtq.req.ready ) 281 // XSPerfAccumulate("fetch_bubble_pipe_stall", f0_valid && toICache(0).ready && toICache(1).ready && !f1_ready ) 282 // XSPerfAccumulate("fetch_bubble_icache_0_busy", f0_valid && !toICache(0).ready ) 283 // XSPerfAccumulate("fetch_bubble_icache_1_busy", f0_valid && !toICache(1).ready ) 284 XSPerfAccumulate("fetch_flush_backend_redirect", backend_redirect ) 285 XSPerfAccumulate("fetch_flush_wb_redirect", wb_redirect ) 286 XSPerfAccumulate("fetch_flush_bpu_f1_flush", from_bpu_f1_flush ) 287 XSPerfAccumulate("fetch_flush_bpu_f0_flush", from_bpu_f0_flush ) 288 289 290 /** 291 ****************************************************************************** 292 * IFU Stage 1 293 * - calculate pc/half_pc/cut_ptr for every instruction 294 ****************************************************************************** 295 */ 296 297 val f1_valid = RegInit(false.B) 298 val f1_ftq_req = RegEnable(f0_ftq_req, f0_fire) 299 // val f1_situation = RegEnable(f0_situation, f0_fire) 300 val f1_doubleLine = RegEnable(f0_doubleLine, f0_fire) 301 val f1_vSetIdx = RegEnable(f0_vSetIdx, f0_fire) 302 val f1_fire = f1_valid && f2_ready 303 304 f1_ready := f1_fire || !f1_valid 305 306 from_bpu_f1_flush := fromFtq.flushFromBpu.shouldFlushByStage3(f1_ftq_req.ftqIdx) && f1_valid 307 // from_bpu_f1_flush := false.B 308 309 when(f1_flush) {f1_valid := false.B} 310 .elsewhen(f0_fire && !f0_flush) {f1_valid := true.B} 311 .elsewhen(f1_fire) {f1_valid := false.B} 312 313 val f1_pc_high = f1_ftq_req.startAddr(VAddrBits-1, PcCutPoint) 314 val f1_pc_high_plus1 = f1_pc_high + 1.U 315 316 /** 317 * In order to reduce power consumption, avoid calculating the full PC value in the first level. 318 * code of original logic, this code has been deprecated 319 * val f1_pc = VecInit(f1_pc_lower_result.map{ i => 320 * Mux(i(f1_pc_adder_cut_point), Cat(f1_pc_high_plus1,i(f1_pc_adder_cut_point-1,0)), Cat(f1_pc_high,i(f1_pc_adder_cut_point-1,0)))}) 321 * 322 */ 323 val f1_pc_lower_result = VecInit((0 until PredictWidth).map(i => Cat(0.U(1.W), f1_ftq_req.startAddr(PcCutPoint-1, 0)) + (i * 2).U)) // cat with overflow bit 324 325 val f1_pc = CatPC(f1_pc_lower_result, f1_pc_high, f1_pc_high_plus1) 326 327 val f1_half_snpc_lower_result = VecInit((0 until PredictWidth).map(i => Cat(0.U(1.W), f1_ftq_req.startAddr(PcCutPoint-1, 0)) + ((i+2) * 2).U)) // cat with overflow bit 328 val f1_half_snpc = CatPC(f1_half_snpc_lower_result, f1_pc_high, f1_pc_high_plus1) 329 330 if (env.FPGAPlatform){ 331 val f1_pc_diff = VecInit((0 until PredictWidth).map(i => f1_ftq_req.startAddr + (i * 2).U)) 332 val f1_half_snpc_diff = VecInit((0 until PredictWidth).map(i => f1_ftq_req.startAddr + ((i+2) * 2).U)) 333 334 XSError(f1_pc.zip(f1_pc_diff).map{ case (a,b) => a.asUInt =/= b.asUInt }.reduce(_||_), "f1_half_snpc adder cut fail") 335 XSError(f1_half_snpc.zip(f1_half_snpc_diff).map{ case (a,b) => a.asUInt =/= b.asUInt }.reduce(_||_), "f1_half_snpc adder cut fail") 336 } 337 338 val f1_cut_ptr = if(HasCExtension) VecInit((0 until PredictWidth + 1).map(i => Cat(0.U(2.W), f1_ftq_req.startAddr(blockOffBits-1, 1)) + i.U )) 339 else VecInit((0 until PredictWidth).map(i => Cat(0.U(2.W), f1_ftq_req.startAddr(blockOffBits-1, 2)) + i.U )) 340 341 /** 342 ****************************************************************************** 343 * IFU Stage 2 344 * - icache response data (latched for pipeline stop) 345 * - generate exceprion bits for every instruciton (page fault/access fault/mmio) 346 * - generate predicted instruction range (1 means this instruciton is in this fetch packet) 347 * - cut data from cachlines to packet instruction code 348 * - instruction predecode and RVC expand 349 ****************************************************************************** 350 */ 351 352 val icacheRespAllValid = WireInit(false.B) 353 354 val f2_valid = RegInit(false.B) 355 val f2_ftq_req = RegEnable(f1_ftq_req, f1_fire) 356 // val f2_situation = RegEnable(f1_situation, f1_fire) 357 val f2_doubleLine = RegEnable(f1_doubleLine, f1_fire) 358 val f2_vSetIdx = RegEnable(f1_vSetIdx, f1_fire) 359 val f2_fire = f2_valid && f3_ready && icacheRespAllValid 360 361 f2_ready := f2_fire || !f2_valid 362 //TODO: addr compare may be timing critical 363 val f2_icache_all_resp_wire = fromICache(0).valid && (fromICache(0).bits.vaddr === f2_ftq_req.startAddr) && ((fromICache(1).valid && (fromICache(1).bits.vaddr === f2_ftq_req.nextlineStart)) || !f2_doubleLine) 364 val f2_icache_all_resp_reg = RegInit(false.B) 365 366 icacheRespAllValid := f2_icache_all_resp_reg || f2_icache_all_resp_wire 367 368 icacheMissBubble := io.icacheInter.topdownIcacheMiss 369 itlbMissBubble := io.icacheInter.topdownItlbMiss 370 371 io.icacheStop := !f3_ready 372 373 when(f2_flush) {f2_icache_all_resp_reg := false.B} 374 .elsewhen(f2_valid && f2_icache_all_resp_wire && !f3_ready) {f2_icache_all_resp_reg := true.B} 375 .elsewhen(f2_fire && f2_icache_all_resp_reg) {f2_icache_all_resp_reg := false.B} 376 377 when(f2_flush) {f2_valid := false.B} 378 .elsewhen(f1_fire && !f1_flush) {f2_valid := true.B } 379 .elsewhen(f2_fire) {f2_valid := false.B} 380 381 val f2_except_pf = VecInit((0 until PortNumber).map(i => fromICache(i).bits.tlbExcp.pageFault)) 382 val f2_except_gpf = VecInit((0 until PortNumber).map(i => fromICache(i).bits.tlbExcp.guestPageFault)) 383 val f2_except_af = VecInit((0 until PortNumber).map(i => fromICache(i).bits.tlbExcp.accessFault)) 384 // paddr and gpaddr of [startAddr, nextLineAddr] 385 val f2_paddrs = VecInit((0 until PortNumber).map(i => fromICache(i).bits.paddr)) 386 val f2_gpaddr = fromICache(0).bits.gpaddr 387 val f2_mmio = fromICache(0).bits.tlbExcp.mmio && 388 !fromICache(0).bits.tlbExcp.accessFault && 389 !fromICache(0).bits.tlbExcp.pageFault && 390 !fromICache(0).bits.tlbExcp.guestPageFault 391 392 /** 393 * reduce the number of registers, origin code 394 * f2_pc = RegEnable(f1_pc, f1_fire) 395 */ 396 val f2_pc_lower_result = RegEnable(f1_pc_lower_result, f1_fire) 397 val f2_pc_high = RegEnable(f1_pc_high, f1_fire) 398 val f2_pc_high_plus1 = RegEnable(f1_pc_high_plus1, f1_fire) 399 val f2_pc = CatPC(f2_pc_lower_result, f2_pc_high, f2_pc_high_plus1) 400 401 val f2_cut_ptr = RegEnable(f1_cut_ptr, f1_fire) 402 val f2_resend_vaddr = RegEnable(f1_ftq_req.startAddr + 2.U, f1_fire) 403 404 def isNextLine(pc: UInt, startAddr: UInt) = { 405 startAddr(blockOffBits) ^ pc(blockOffBits) 406 } 407 408 def isLastInLine(pc: UInt) = { 409 pc(blockOffBits - 1, 0) === "b111110".U 410 } 411 412 val f2_foldpc = VecInit(f2_pc.map(i => XORFold(i(VAddrBits-1,1), MemPredPCWidth))) 413 val f2_jump_range = Fill(PredictWidth, !f2_ftq_req.ftqOffset.valid) | Fill(PredictWidth, 1.U(1.W)) >> ~f2_ftq_req.ftqOffset.bits 414 val f2_ftr_range = Fill(PredictWidth, f2_ftq_req.ftqOffset.valid) | Fill(PredictWidth, 1.U(1.W)) >> ~getBasicBlockIdx(f2_ftq_req.nextStartAddr, f2_ftq_req.startAddr) 415 val f2_instr_range = f2_jump_range & f2_ftr_range 416 val f2_pf_vec = VecInit((0 until PredictWidth).map(i => (!isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_except_pf(0) || isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_doubleLine && f2_except_pf(1)))) 417 val f2_af_vec = VecInit((0 until PredictWidth).map(i => (!isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_except_af(0) || isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_doubleLine && f2_except_af(1)))) 418 val f2_gpf_vec = VecInit((0 until PredictWidth).map(i => (!isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_except_gpf(0) || isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_doubleLine && f2_except_gpf(1)))) 419 val f2_perf_info = io.icachePerfInfo 420 421 def cut(cacheline: UInt, cutPtr: Vec[UInt]) : Vec[UInt] ={ 422 require(HasCExtension) 423 // if(HasCExtension){ 424 val result = Wire(Vec(PredictWidth + 1, UInt(16.W))) 425 val dataVec = cacheline.asTypeOf(Vec(blockBytes, UInt(16.W))) //32 16-bit data vector 426 (0 until PredictWidth + 1).foreach( i => 427 result(i) := dataVec(cutPtr(i)) //the max ptr is 3*blockBytes/4-1 428 ) 429 result 430 // } else { 431 // val result = Wire(Vec(PredictWidth, UInt(32.W)) ) 432 // val dataVec = cacheline.asTypeOf(Vec(blockBytes * 2/ 4, UInt(32.W))) 433 // (0 until PredictWidth).foreach( i => 434 // result(i) := dataVec(cutPtr(i)) 435 // ) 436 // result 437 // } 438 } 439 440 val f2_cache_response_data = fromICache.map(_.bits.data) 441 val f2_data_2_cacheline = Cat(f2_cache_response_data(0), f2_cache_response_data(0)) 442 443 val f2_cut_data = cut(f2_data_2_cacheline, f2_cut_ptr) 444 445 /** predecode (include RVC expander) */ 446 // preDecoderRegIn.data := f2_reg_cut_data 447 // preDecoderRegInIn.frontendTrigger := io.frontendTrigger 448 // preDecoderRegInIn.csrTriggerEnable := io.csrTriggerEnable 449 // preDecoderRegIn.pc := f2_pc 450 451 val preDecoderIn = preDecoder.io.in 452 preDecoderIn.valid := f2_valid 453 preDecoderIn.bits.data := f2_cut_data 454 preDecoderIn.bits.frontendTrigger := io.frontendTrigger 455 preDecoderIn.bits.pc := f2_pc 456 val preDecoderOut = preDecoder.io.out 457 458 //val f2_expd_instr = preDecoderOut.expInstr 459 val f2_instr = preDecoderOut.instr 460 val f2_pd = preDecoderOut.pd 461 val f2_jump_offset = preDecoderOut.jumpOffset 462 val f2_hasHalfValid = preDecoderOut.hasHalfValid 463 val f2_crossPageFault = VecInit((0 until PredictWidth).map(i => isLastInLine(f2_pc(i)) && !f2_except_pf(0) && f2_doubleLine && f2_except_pf(1) && !f2_pd(i).isRVC )) 464 val f2_crossGuestPageFault = VecInit((0 until PredictWidth).map(i => isLastInLine(f2_pc(i)) && !f2_except_gpf(0) && f2_doubleLine && f2_except_gpf(1) && !f2_pd(i).isRVC )) 465 XSPerfAccumulate("fetch_bubble_icache_not_resp", f2_valid && !icacheRespAllValid ) 466 467 468 /** 469 ****************************************************************************** 470 * IFU Stage 3 471 * - handle MMIO instruciton 472 * -send request to Uncache fetch Unit 473 * -every packet include 1 MMIO instruction 474 * -MMIO instructions will stop fetch pipeline until commiting from RoB 475 * -flush to snpc (send ifu_redirect to Ftq) 476 * - Ibuffer enqueue 477 * - check predict result in Frontend (jalFault/retFault/notCFIFault/invalidTakenFault/targetFault) 478 * - handle last half RVI instruction 479 ****************************************************************************** 480 */ 481 482 val f3_valid = RegInit(false.B) 483 val f3_ftq_req = RegEnable(f2_ftq_req, f2_fire) 484 // val f3_situation = RegEnable(f2_situation, f2_fire) 485 val f3_doubleLine = RegEnable(f2_doubleLine, f2_fire) 486 val f3_fire = io.toIbuffer.fire 487 488 val f3_cut_data = RegEnable(f2_cut_data, f2_fire) 489 490 val f3_except_pf = RegEnable(f2_except_pf, f2_fire) 491 val f3_except_af = RegEnable(f2_except_af, f2_fire) 492 val f3_except_gpf = RegEnable(f2_except_gpf, f2_fire) 493 val f3_mmio = RegEnable(f2_mmio , f2_fire) 494 495 //val f3_expd_instr = RegEnable(f2_expd_instr, f2_fire) 496 val f3_instr = RegEnable(f2_instr, f2_fire) 497 val f3_expd_instr = VecInit((0 until PredictWidth).map{ i => 498 val expander = Module(new RVCExpander) 499 expander.io.in := f3_instr(i) 500 expander.io.out.bits 501 }) 502 503 val f3_pd_wire = RegEnable(f2_pd, f2_fire) 504 val f3_pd = WireInit(f3_pd_wire) 505 val f3_jump_offset = RegEnable(f2_jump_offset, f2_fire) 506 val f3_af_vec = RegEnable(f2_af_vec, f2_fire) 507 val f3_pf_vec = RegEnable(f2_pf_vec , f2_fire) 508 val f3_gpf_vec = RegEnable(f2_gpf_vec, f2_fire) 509 510 val f3_pc_lower_result = RegEnable(f2_pc_lower_result, f2_fire) 511 val f3_pc_high = RegEnable(f2_pc_high, f2_fire) 512 val f3_pc_high_plus1 = RegEnable(f2_pc_high_plus1, f2_fire) 513 val f3_pc = CatPC(f3_pc_lower_result, f3_pc_high, f3_pc_high_plus1) 514 515 val f3_pc_last_lower_result_plus2 = RegEnable(f2_pc_lower_result(PredictWidth - 1) + 2.U, f2_fire) 516 val f3_pc_last_lower_result_plus4 = RegEnable(f2_pc_lower_result(PredictWidth - 1) + 4.U, f2_fire) 517 //val f3_half_snpc = RegEnable(f2_half_snpc, f2_fire) 518 519 /** 520 *********************************************************************** 521 * Half snpc(i) is larger than pc(i) by 4. Using pc to calculate half snpc may be a good choice. 522 *********************************************************************** 523 */ 524 val f3_half_snpc = Wire(Vec(PredictWidth,UInt(VAddrBits.W))) 525 for(i <- 0 until PredictWidth){ 526 if(i == (PredictWidth - 2)){ 527 f3_half_snpc(i) := CatPC(f3_pc_last_lower_result_plus2, f3_pc_high, f3_pc_high_plus1) 528 } else if (i == (PredictWidth - 1)){ 529 f3_half_snpc(i) := CatPC(f3_pc_last_lower_result_plus4, f3_pc_high, f3_pc_high_plus1) 530 } else { 531 f3_half_snpc(i) := f3_pc(i+2) 532 } 533 } 534 535 val f3_instr_range = RegEnable(f2_instr_range, f2_fire) 536 val f3_foldpc = RegEnable(f2_foldpc, f2_fire) 537 val f3_crossPageFault = RegEnable(f2_crossPageFault, f2_fire) 538 val f3_crossGuestPageFault = RegEnable(f2_crossGuestPageFault, f2_fire) 539 val f3_hasHalfValid = RegEnable(f2_hasHalfValid, f2_fire) 540 val f3_except = VecInit((0 until 2).map{i => f3_except_pf(i) || f3_except_af(i) || f3_except_gpf(i)}) 541 val f3_has_except = f3_valid && (f3_except_af.reduce(_||_) || f3_except_pf.reduce(_||_) || f3_except_gpf.reduce(_||_)) 542 val f3_paddrs = RegEnable(f2_paddrs, f2_fire) 543 val f3_gpaddr = RegEnable(f2_gpaddr, f2_fire) 544 val f3_resend_vaddr = RegEnable(f2_resend_vaddr, f2_fire) 545 546 // Expand 1 bit to prevent overflow when assert 547 val f3_ftq_req_startAddr = Cat(0.U(1.W), f3_ftq_req.startAddr) 548 val f3_ftq_req_nextStartAddr = Cat(0.U(1.W), f3_ftq_req.nextStartAddr) 549 // brType, isCall and isRet generation is delayed to f3 stage 550 val f3Predecoder = Module(new F3Predecoder) 551 552 f3Predecoder.io.in.instr := f3_instr 553 554 f3_pd.zipWithIndex.map{ case (pd,i) => 555 pd.brType := f3Predecoder.io.out.pd(i).brType 556 pd.isCall := f3Predecoder.io.out.pd(i).isCall 557 pd.isRet := f3Predecoder.io.out.pd(i).isRet 558 } 559 560 val f3PdDiff = f3_pd_wire.zip(f3_pd).map{ case (a,b) => a.asUInt =/= b.asUInt }.reduce(_||_) 561 XSError(f3_valid && f3PdDiff, "f3 pd diff") 562 563 when(f3_valid && !f3_ftq_req.ftqOffset.valid){ 564 assert(f3_ftq_req_startAddr + (2*PredictWidth).U >= f3_ftq_req_nextStartAddr, s"More tha ${2*PredictWidth} Bytes fetch is not allowed!") 565 } 566 567 /*** MMIO State Machine***/ 568 val f3_mmio_data = Reg(Vec(2, UInt(16.W))) 569 val mmio_is_RVC = RegInit(false.B) 570 val mmio_resend_addr = RegInit(0.U(PAddrBits.W)) 571 val mmio_resend_af = RegInit(false.B) 572 val mmio_resend_pf = RegInit(false.B) 573 val mmio_resend_gpf = RegInit(false.B) 574 val mmio_resend_gpaddr = RegInit(0.U(GPAddrBits.W)) 575 576 //last instuction finish 577 val is_first_instr = RegInit(true.B) 578 /*** Determine whether the MMIO instruction is executable based on the previous prediction block ***/ 579 io.mmioCommitRead.mmioFtqPtr := RegNext(f3_ftq_req.ftqIdx - 1.U) 580 581 val m_idle :: m_waitLastCmt:: m_sendReq :: m_waitResp :: m_sendTLB :: m_tlbResp :: m_sendPMP :: m_resendReq :: m_waitResendResp :: m_waitCommit :: m_commited :: Nil = Enum(11) 582 val mmio_state = RegInit(m_idle) 583 584 val f3_req_is_mmio = f3_mmio && f3_valid 585 val mmio_commit = VecInit(io.rob_commits.map{commit => commit.valid && commit.bits.ftqIdx === f3_ftq_req.ftqIdx && commit.bits.ftqOffset === 0.U}).asUInt.orR 586 val f3_mmio_req_commit = f3_req_is_mmio && mmio_state === m_commited 587 588 val f3_mmio_to_commit = f3_req_is_mmio && mmio_state === m_waitCommit 589 val f3_mmio_to_commit_next = RegNext(f3_mmio_to_commit) 590 val f3_mmio_can_go = f3_mmio_to_commit && !f3_mmio_to_commit_next 591 592 val fromFtqRedirectReg = Wire(fromFtq.redirect.cloneType) 593 fromFtqRedirectReg.bits := RegEnable(fromFtq.redirect.bits, 0.U.asTypeOf(fromFtq.redirect.bits), fromFtq.redirect.valid) 594 fromFtqRedirectReg.valid := RegNext(fromFtq.redirect.valid, init = false.B) 595 val mmioF3Flush = RegNext(f3_flush,init = false.B) 596 val f3_ftq_flush_self = fromFtqRedirectReg.valid && RedirectLevel.flushItself(fromFtqRedirectReg.bits.level) 597 val f3_ftq_flush_by_older = fromFtqRedirectReg.valid && isBefore(fromFtqRedirectReg.bits.ftqIdx, f3_ftq_req.ftqIdx) 598 599 val f3_need_not_flush = f3_req_is_mmio && fromFtqRedirectReg.valid && !f3_ftq_flush_self && !f3_ftq_flush_by_older 600 601 /** 602 ********************************************************************************** 603 * We want to defer instruction fetching when encountering MMIO instructions to ensure that the MMIO region is not negatively impacted. 604 * This is the exception when the first instruction is an MMIO instruction. 605 ********************************************************************************** 606 */ 607 when(is_first_instr && f3_fire){ 608 is_first_instr := false.B 609 } 610 611 when(f3_flush && !f3_req_is_mmio) {f3_valid := false.B} 612 .elsewhen(mmioF3Flush && f3_req_is_mmio && !f3_need_not_flush) {f3_valid := false.B} 613 .elsewhen(f2_fire && !f2_flush ) {f3_valid := true.B } 614 .elsewhen(io.toIbuffer.fire && !f3_req_is_mmio) {f3_valid := false.B} 615 .elsewhen{f3_req_is_mmio && f3_mmio_req_commit} {f3_valid := false.B} 616 617 val f3_mmio_use_seq_pc = RegInit(false.B) 618 619 val (redirect_ftqIdx, redirect_ftqOffset) = (fromFtqRedirectReg.bits.ftqIdx,fromFtqRedirectReg.bits.ftqOffset) 620 val redirect_mmio_req = fromFtqRedirectReg.valid && redirect_ftqIdx === f3_ftq_req.ftqIdx && redirect_ftqOffset === 0.U 621 622 when(RegNext(f2_fire && !f2_flush) && f3_req_is_mmio) { f3_mmio_use_seq_pc := true.B } 623 .elsewhen(redirect_mmio_req) { f3_mmio_use_seq_pc := false.B } 624 625 f3_ready := (io.toIbuffer.ready && (f3_mmio_req_commit || !f3_req_is_mmio)) || !f3_valid 626 627 // mmio state machine 628 switch(mmio_state){ 629 is(m_idle){ 630 when(f3_req_is_mmio){ 631 mmio_state := m_waitLastCmt 632 } 633 } 634 635 is(m_waitLastCmt){ 636 when(is_first_instr){ 637 mmio_state := m_sendReq 638 }.otherwise{ 639 mmio_state := Mux(io.mmioCommitRead.mmioLastCommit, m_sendReq, m_waitLastCmt) 640 } 641 } 642 643 is(m_sendReq){ 644 mmio_state := Mux(toUncache.fire, m_waitResp, m_sendReq) 645 } 646 647 is(m_waitResp){ 648 when(fromUncache.fire){ 649 val isRVC = fromUncache.bits.data(1,0) =/= 3.U 650 val needResend = !isRVC && f3_paddrs(0)(2,1) === 3.U 651 mmio_state := Mux(needResend, m_sendTLB, m_waitCommit) 652 mmio_is_RVC := isRVC 653 f3_mmio_data(0) := fromUncache.bits.data(15,0) 654 f3_mmio_data(1) := fromUncache.bits.data(31,16) 655 } 656 } 657 658 is(m_sendTLB){ 659 when( io.iTLBInter.req.valid && !io.iTLBInter.resp.bits.miss ){ 660 mmio_state := m_tlbResp 661 } 662 } 663 664 is(m_tlbResp){ 665 val tlbExept = io.iTLBInter.resp.bits.excp(0).pf.instr || 666 io.iTLBInter.resp.bits.excp(0).af.instr || 667 io.iTLBInter.resp.bits.excp(0).gpf.instr 668 mmio_state := Mux(tlbExept, m_waitCommit, m_sendPMP) 669 mmio_resend_addr := io.iTLBInter.resp.bits.paddr(0) 670 mmio_resend_af := mmio_resend_af || io.iTLBInter.resp.bits.excp(0).af.instr 671 mmio_resend_pf := mmio_resend_pf || io.iTLBInter.resp.bits.excp(0).pf.instr 672 mmio_resend_gpf := mmio_resend_gpf || io.iTLBInter.resp.bits.excp(0).gpf.instr 673 mmio_resend_gpaddr := io.iTLBInter.resp.bits.gpaddr(0) 674 } 675 676 is(m_sendPMP){ 677 val pmpExcpAF = io.pmp.resp.instr || !io.pmp.resp.mmio 678 mmio_state := Mux(pmpExcpAF, m_waitCommit, m_resendReq) 679 mmio_resend_af := pmpExcpAF 680 } 681 682 is(m_resendReq){ 683 mmio_state := Mux(toUncache.fire, m_waitResendResp, m_resendReq) 684 } 685 686 is(m_waitResendResp){ 687 when(fromUncache.fire){ 688 mmio_state := m_waitCommit 689 f3_mmio_data(1) := fromUncache.bits.data(15,0) 690 } 691 } 692 693 is(m_waitCommit){ 694 when(mmio_commit){ 695 mmio_state := m_commited 696 } 697 } 698 699 //normal mmio instruction 700 is(m_commited){ 701 mmio_state := m_idle 702 mmio_is_RVC := false.B 703 mmio_resend_addr := 0.U 704 mmio_resend_af := false.B 705 mmio_resend_pf := false.B 706 mmio_resend_gpf := false.B 707 mmio_resend_gpaddr := 0.U 708 } 709 } 710 711 // Exception or flush by older branch prediction 712 // Condition is from RegNext(fromFtq.redirect), 1 cycle after backend rediect 713 when(f3_ftq_flush_self || f3_ftq_flush_by_older) { 714 mmio_state := m_idle 715 mmio_is_RVC := false.B 716 mmio_resend_addr := 0.U 717 mmio_resend_af := false.B 718 mmio_resend_pf := false.B 719 mmio_resend_gpf := false.B 720 mmio_resend_gpaddr := 0.U 721 f3_mmio_data.map(_ := 0.U) 722 } 723 724 toUncache.valid := ((mmio_state === m_sendReq) || (mmio_state === m_resendReq)) && f3_req_is_mmio 725 toUncache.bits.addr := Mux((mmio_state === m_resendReq), mmio_resend_addr, f3_paddrs(0)) 726 fromUncache.ready := true.B 727 728 io.iTLBInter.req.valid := (mmio_state === m_sendTLB) && f3_req_is_mmio 729 io.iTLBInter.req.bits.size := 3.U 730 io.iTLBInter.req.bits.vaddr := f3_resend_vaddr 731 io.iTLBInter.req.bits.debug.pc := f3_resend_vaddr 732 io.iTLBInter.req.bits.hyperinst:= DontCare 733 io.iTLBInter.req.bits.hlvx := DontCare 734 735 io.iTLBInter.req.bits.kill := false.B // IFU use itlb for mmio, doesn't need sync, set it to false 736 io.iTLBInter.req.bits.cmd := TlbCmd.exec 737 io.iTLBInter.req.bits.memidx := DontCare 738 io.iTLBInter.req.bits.debug.robIdx := DontCare 739 io.iTLBInter.req.bits.no_translate := false.B 740 io.iTLBInter.req.bits.debug.isFirstIssue := DontCare 741 742 io.pmp.req.valid := (mmio_state === m_sendPMP) && f3_req_is_mmio 743 io.pmp.req.bits.addr := mmio_resend_addr 744 io.pmp.req.bits.size := 3.U 745 io.pmp.req.bits.cmd := TlbCmd.exec 746 747 val f3_lastHalf = RegInit(0.U.asTypeOf(new LastHalfInfo)) 748 749 val f3_predecode_range = VecInit(preDecoderOut.pd.map(inst => inst.valid)).asUInt 750 val f3_mmio_range = VecInit((0 until PredictWidth).map(i => if(i ==0) true.B else false.B)) 751 val f3_instr_valid = Wire(Vec(PredictWidth, Bool())) 752 753 /*** prediction result check ***/ 754 checkerIn.ftqOffset := f3_ftq_req.ftqOffset 755 checkerIn.jumpOffset := f3_jump_offset 756 checkerIn.target := f3_ftq_req.nextStartAddr 757 checkerIn.instrRange := f3_instr_range.asTypeOf(Vec(PredictWidth, Bool())) 758 checkerIn.instrValid := f3_instr_valid.asTypeOf(Vec(PredictWidth, Bool())) 759 checkerIn.pds := f3_pd 760 checkerIn.pc := f3_pc 761 checkerIn.fire_in := RegNext(f2_fire, init = false.B) 762 763 /*** handle half RVI in the last 2 Bytes ***/ 764 765 def hasLastHalf(idx: UInt) = { 766 //!f3_pd(idx).isRVC && checkerOutStage1.fixedRange(idx) && f3_instr_valid(idx) && !checkerOutStage1.fixedTaken(idx) && !checkerOutStage2.fixedMissPred(idx) && ! f3_req_is_mmio 767 !f3_pd(idx).isRVC && checkerOutStage1.fixedRange(idx) && f3_instr_valid(idx) && !checkerOutStage1.fixedTaken(idx) && ! f3_req_is_mmio 768 } 769 770 val f3_last_validIdx = ParallelPosteriorityEncoder(checkerOutStage1.fixedRange) 771 772 val f3_hasLastHalf = hasLastHalf((PredictWidth - 1).U) 773 val f3_false_lastHalf = hasLastHalf(f3_last_validIdx) 774 val f3_false_snpc = f3_half_snpc(f3_last_validIdx) 775 776 val f3_lastHalf_mask = VecInit((0 until PredictWidth).map( i => if(i ==0) false.B else true.B )).asUInt 777 val f3_lastHalf_disable = RegInit(false.B) 778 779 when(f3_flush || (f3_fire && f3_lastHalf_disable)){ 780 f3_lastHalf_disable := false.B 781 } 782 783 when (f3_flush) { 784 f3_lastHalf.valid := false.B 785 }.elsewhen (f3_fire) { 786 f3_lastHalf.valid := f3_hasLastHalf && !f3_lastHalf_disable 787 f3_lastHalf.middlePC := f3_ftq_req.nextStartAddr 788 } 789 790 f3_instr_valid := Mux(f3_lastHalf.valid,f3_hasHalfValid ,VecInit(f3_pd.map(inst => inst.valid))) 791 792 /*** frontend Trigger ***/ 793 frontendTrigger.io.pds := f3_pd 794 frontendTrigger.io.pc := f3_pc 795 frontendTrigger.io.data := f3_cut_data 796 797 frontendTrigger.io.frontendTrigger := io.frontendTrigger 798 799 val f3_triggered = frontendTrigger.io.triggered 800 val f3_toIbuffer_valid = f3_valid && (!f3_req_is_mmio || f3_mmio_can_go) && !f3_flush 801 802 /*** send to Ibuffer ***/ 803 io.toIbuffer.valid := f3_toIbuffer_valid 804 io.toIbuffer.bits.instrs := f3_expd_instr 805 io.toIbuffer.bits.valid := f3_instr_valid.asUInt 806 io.toIbuffer.bits.enqEnable := checkerOutStage1.fixedRange.asUInt & f3_instr_valid.asUInt 807 io.toIbuffer.bits.pd := f3_pd 808 io.toIbuffer.bits.ftqPtr := f3_ftq_req.ftqIdx 809 io.toIbuffer.bits.pc := f3_pc 810 io.toIbuffer.bits.ftqOffset.zipWithIndex.map{case(a, i) => a.bits := i.U; a.valid := checkerOutStage1.fixedTaken(i) && !f3_req_is_mmio} 811 io.toIbuffer.bits.foldpc := f3_foldpc 812 io.toIbuffer.bits.exceptionType := (0 until PredictWidth).map(i => MuxCase(ExceptionType.none, Array( 813 (f3_pf_vec(i) || f3_crossPageFault(i)) -> ExceptionType.ipf, 814 (f3_gpf_vec(i) || f3_crossGuestPageFault(i)) -> ExceptionType.igpf, 815 f3_af_vec(i) -> ExceptionType.acf 816 ))) 817 io.toIbuffer.bits.crossPageIPFFix := (0 until PredictWidth).map(i => f3_crossPageFault(i) || f3_crossGuestPageFault(i)) 818 io.toIbuffer.bits.triggered := f3_triggered 819 820 when(f3_lastHalf.valid){ 821 io.toIbuffer.bits.enqEnable := checkerOutStage1.fixedRange.asUInt & f3_instr_valid.asUInt & f3_lastHalf_mask 822 io.toIbuffer.bits.valid := f3_lastHalf_mask & f3_instr_valid.asUInt 823 } 824 825 /** to backend */ 826 // f3_gpaddr is valid iff gpf is detected 827 io.toBackend.gpaddrMem_wen := f3_toIbuffer_valid && Mux( 828 f3_req_is_mmio, 829 mmio_resend_gpf, 830 f3_gpf_vec.asUInt.orR || f3_crossGuestPageFault.asUInt.orR 831 ) 832 io.toBackend.gpaddrMem_waddr := f3_ftq_req.ftqIdx.value 833 io.toBackend.gpaddrMem_wdata := Mux(f3_req_is_mmio, mmio_resend_gpaddr, f3_gpaddr) 834 835 //Write back to Ftq 836 val f3_cache_fetch = f3_valid && !(f2_fire && !f2_flush) 837 val finishFetchMaskReg = RegNext(f3_cache_fetch) 838 839 val mmioFlushWb = Wire(Valid(new PredecodeWritebackBundle)) 840 val f3_mmio_missOffset = Wire(ValidUndirectioned(UInt(log2Ceil(PredictWidth).W))) 841 f3_mmio_missOffset.valid := f3_req_is_mmio 842 f3_mmio_missOffset.bits := 0.U 843 844 // Send mmioFlushWb back to FTQ 1 cycle after uncache fetch return 845 // When backend redirect, mmio_state reset after 1 cycle. 846 // In this case, mask .valid to avoid overriding backend redirect 847 mmioFlushWb.valid := (f3_req_is_mmio && mmio_state === m_waitCommit && RegNext(fromUncache.fire) && 848 f3_mmio_use_seq_pc && !f3_ftq_flush_self && !f3_ftq_flush_by_older) 849 mmioFlushWb.bits.pc := f3_pc 850 mmioFlushWb.bits.pd := f3_pd 851 mmioFlushWb.bits.pd.zipWithIndex.map{case(instr,i) => instr.valid := f3_mmio_range(i)} 852 mmioFlushWb.bits.ftqIdx := f3_ftq_req.ftqIdx 853 mmioFlushWb.bits.ftqOffset := f3_ftq_req.ftqOffset.bits 854 mmioFlushWb.bits.misOffset := f3_mmio_missOffset 855 mmioFlushWb.bits.cfiOffset := DontCare 856 mmioFlushWb.bits.target := Mux(mmio_is_RVC, f3_ftq_req.startAddr + 2.U , f3_ftq_req.startAddr + 4.U) 857 mmioFlushWb.bits.jalTarget := DontCare 858 mmioFlushWb.bits.instrRange := f3_mmio_range 859 860 /** external predecode for MMIO instruction */ 861 when(f3_req_is_mmio){ 862 val inst = Cat(f3_mmio_data(1), f3_mmio_data(0)) 863 val currentIsRVC = isRVC(inst) 864 865 val brType::isCall::isRet::Nil = brInfo(inst) 866 val jalOffset = jal_offset(inst, currentIsRVC) 867 val brOffset = br_offset(inst, currentIsRVC) 868 869 io.toIbuffer.bits.instrs(0) := new RVCDecoder(inst, XLEN, fLen, useAddiForMv = true).decode.bits 870 871 872 io.toIbuffer.bits.pd(0).valid := true.B 873 io.toIbuffer.bits.pd(0).isRVC := currentIsRVC 874 io.toIbuffer.bits.pd(0).brType := brType 875 io.toIbuffer.bits.pd(0).isCall := isCall 876 io.toIbuffer.bits.pd(0).isRet := isRet 877 878 when (mmio_resend_af) { 879 io.toIbuffer.bits.exceptionType(0) := ExceptionType.acf 880 } .elsewhen (mmio_resend_pf) { 881 io.toIbuffer.bits.exceptionType(0) := ExceptionType.ipf 882 } .elsewhen (mmio_resend_gpf) { 883 io.toIbuffer.bits.exceptionType(0) := ExceptionType.igpf 884 } 885 io.toIbuffer.bits.crossPageIPFFix(0) := mmio_resend_pf 886 887 io.toIbuffer.bits.enqEnable := f3_mmio_range.asUInt 888 889 mmioFlushWb.bits.pd(0).valid := true.B 890 mmioFlushWb.bits.pd(0).isRVC := currentIsRVC 891 mmioFlushWb.bits.pd(0).brType := brType 892 mmioFlushWb.bits.pd(0).isCall := isCall 893 mmioFlushWb.bits.pd(0).isRet := isRet 894 } 895 896 mmio_redirect := (f3_req_is_mmio && mmio_state === m_waitCommit && RegNext(fromUncache.fire) && f3_mmio_use_seq_pc) 897 898 XSPerfAccumulate("fetch_bubble_ibuffer_not_ready", io.toIbuffer.valid && !io.toIbuffer.ready ) 899 900 901 /** 902 ****************************************************************************** 903 * IFU Write Back Stage 904 * - write back predecode information to Ftq to update 905 * - redirect if found fault prediction 906 * - redirect if has false hit last half (last PC is not start + 32 Bytes, but in the midle of an notCFI RVI instruction) 907 ****************************************************************************** 908 */ 909 val wb_enable = RegNext(f2_fire && !f2_flush) && !f3_req_is_mmio && !f3_flush 910 val wb_valid = RegNext(wb_enable, init = false.B) 911 val wb_ftq_req = RegEnable(f3_ftq_req, wb_enable) 912 913 val wb_check_result_stage1 = RegEnable(checkerOutStage1, wb_enable) 914 val wb_check_result_stage2 = checkerOutStage2 915 val wb_instr_range = RegEnable(io.toIbuffer.bits.enqEnable, wb_enable) 916 917 val wb_pc_lower_result = RegEnable(f3_pc_lower_result, wb_enable) 918 val wb_pc_high = RegEnable(f3_pc_high, wb_enable) 919 val wb_pc_high_plus1 = RegEnable(f3_pc_high_plus1, wb_enable) 920 val wb_pc = CatPC(wb_pc_lower_result, wb_pc_high, wb_pc_high_plus1) 921 922 //val wb_pc = RegEnable(f3_pc, wb_enable) 923 val wb_pd = RegEnable(f3_pd, wb_enable) 924 val wb_instr_valid = RegEnable(f3_instr_valid, wb_enable) 925 926 /* false hit lastHalf */ 927 val wb_lastIdx = RegEnable(f3_last_validIdx, wb_enable) 928 val wb_false_lastHalf = RegEnable(f3_false_lastHalf, wb_enable) && wb_lastIdx =/= (PredictWidth - 1).U 929 val wb_false_target = RegEnable(f3_false_snpc, wb_enable) 930 931 val wb_half_flush = wb_false_lastHalf 932 val wb_half_target = wb_false_target 933 934 /* false oversize */ 935 val lastIsRVC = wb_instr_range.asTypeOf(Vec(PredictWidth,Bool())).last && wb_pd.last.isRVC 936 val lastIsRVI = wb_instr_range.asTypeOf(Vec(PredictWidth,Bool()))(PredictWidth - 2) && !wb_pd(PredictWidth - 2).isRVC 937 val lastTaken = wb_check_result_stage1.fixedTaken.last 938 939 f3_wb_not_flush := wb_ftq_req.ftqIdx === f3_ftq_req.ftqIdx && f3_valid && wb_valid 940 941 /** if a req with a last half but miss predicted enters in wb stage, and this cycle f3 stalls, 942 * we set a flag to notify f3 that the last half flag need not to be set. 943 */ 944 //f3_fire is after wb_valid 945 when(wb_valid && RegNext(f3_hasLastHalf,init = false.B) 946 && wb_check_result_stage2.fixedMissPred(PredictWidth - 1) && !f3_fire && !RegNext(f3_fire,init = false.B) && !f3_flush 947 ){ 948 f3_lastHalf_disable := true.B 949 } 950 951 //wb_valid and f3_fire are in same cycle 952 when(wb_valid && RegNext(f3_hasLastHalf,init = false.B) 953 && wb_check_result_stage2.fixedMissPred(PredictWidth - 1) && f3_fire 954 ){ 955 f3_lastHalf.valid := false.B 956 } 957 958 val checkFlushWb = Wire(Valid(new PredecodeWritebackBundle)) 959 val checkFlushWbjalTargetIdx = ParallelPriorityEncoder(VecInit(wb_pd.zip(wb_instr_valid).map{case (pd, v) => v && pd.isJal })) 960 val checkFlushWbTargetIdx = ParallelPriorityEncoder(wb_check_result_stage2.fixedMissPred) 961 checkFlushWb.valid := wb_valid 962 checkFlushWb.bits.pc := wb_pc 963 checkFlushWb.bits.pd := wb_pd 964 checkFlushWb.bits.pd.zipWithIndex.map{case(instr,i) => instr.valid := wb_instr_valid(i)} 965 checkFlushWb.bits.ftqIdx := wb_ftq_req.ftqIdx 966 checkFlushWb.bits.ftqOffset := wb_ftq_req.ftqOffset.bits 967 checkFlushWb.bits.misOffset.valid := ParallelOR(wb_check_result_stage2.fixedMissPred) || wb_half_flush 968 checkFlushWb.bits.misOffset.bits := Mux(wb_half_flush, wb_lastIdx, ParallelPriorityEncoder(wb_check_result_stage2.fixedMissPred)) 969 checkFlushWb.bits.cfiOffset.valid := ParallelOR(wb_check_result_stage1.fixedTaken) 970 checkFlushWb.bits.cfiOffset.bits := ParallelPriorityEncoder(wb_check_result_stage1.fixedTaken) 971 checkFlushWb.bits.target := Mux(wb_half_flush, wb_half_target, wb_check_result_stage2.fixedTarget(checkFlushWbTargetIdx)) 972 checkFlushWb.bits.jalTarget := wb_check_result_stage2.jalTarget(checkFlushWbjalTargetIdx) 973 checkFlushWb.bits.instrRange := wb_instr_range.asTypeOf(Vec(PredictWidth, Bool())) 974 975 toFtq.pdWb := Mux(wb_valid, checkFlushWb, mmioFlushWb) 976 977 wb_redirect := checkFlushWb.bits.misOffset.valid && wb_valid 978 979 /*write back flush type*/ 980 val checkFaultType = wb_check_result_stage2.faultType 981 val checkJalFault = wb_valid && checkFaultType.map(_.isjalFault).reduce(_||_) 982 val checkRetFault = wb_valid && checkFaultType.map(_.isRetFault).reduce(_||_) 983 val checkTargetFault = wb_valid && checkFaultType.map(_.istargetFault).reduce(_||_) 984 val checkNotCFIFault = wb_valid && checkFaultType.map(_.notCFIFault).reduce(_||_) 985 val checkInvalidTaken = wb_valid && checkFaultType.map(_.invalidTakenFault).reduce(_||_) 986 987 988 XSPerfAccumulate("predecode_flush_jalFault", checkJalFault ) 989 XSPerfAccumulate("predecode_flush_retFault", checkRetFault ) 990 XSPerfAccumulate("predecode_flush_targetFault", checkTargetFault ) 991 XSPerfAccumulate("predecode_flush_notCFIFault", checkNotCFIFault ) 992 XSPerfAccumulate("predecode_flush_incalidTakenFault", checkInvalidTaken ) 993 994 when(checkRetFault){ 995 XSDebug("startAddr:%x nextstartAddr:%x taken:%d takenIdx:%d\n", 996 wb_ftq_req.startAddr, wb_ftq_req.nextStartAddr, wb_ftq_req.ftqOffset.valid, wb_ftq_req.ftqOffset.bits) 997 } 998 999 1000 /** performance counter */ 1001 val f3_perf_info = RegEnable(f2_perf_info, f2_fire) 1002 val f3_req_0 = io.toIbuffer.fire 1003 val f3_req_1 = io.toIbuffer.fire && f3_doubleLine 1004 val f3_hit_0 = io.toIbuffer.fire && f3_perf_info.bank_hit(0) 1005 val f3_hit_1 = io.toIbuffer.fire && f3_doubleLine & f3_perf_info.bank_hit(1) 1006 val f3_hit = f3_perf_info.hit 1007 val perfEvents = Seq( 1008 ("frontendFlush ", wb_redirect ), 1009 ("ifu_req ", io.toIbuffer.fire ), 1010 ("ifu_miss ", io.toIbuffer.fire && !f3_perf_info.hit ), 1011 ("ifu_req_cacheline_0 ", f3_req_0 ), 1012 ("ifu_req_cacheline_1 ", f3_req_1 ), 1013 ("ifu_req_cacheline_0_hit ", f3_hit_1 ), 1014 ("ifu_req_cacheline_1_hit ", f3_hit_1 ), 1015 ("only_0_hit ", f3_perf_info.only_0_hit && io.toIbuffer.fire ), 1016 ("only_0_miss ", f3_perf_info.only_0_miss && io.toIbuffer.fire ), 1017 ("hit_0_hit_1 ", f3_perf_info.hit_0_hit_1 && io.toIbuffer.fire ), 1018 ("hit_0_miss_1 ", f3_perf_info.hit_0_miss_1 && io.toIbuffer.fire ), 1019 ("miss_0_hit_1 ", f3_perf_info.miss_0_hit_1 && io.toIbuffer.fire ), 1020 ("miss_0_miss_1 ", f3_perf_info.miss_0_miss_1 && io.toIbuffer.fire ), 1021 ) 1022 generatePerfEvent() 1023 1024 XSPerfAccumulate("ifu_req", io.toIbuffer.fire ) 1025 XSPerfAccumulate("ifu_miss", io.toIbuffer.fire && !f3_hit ) 1026 XSPerfAccumulate("ifu_req_cacheline_0", f3_req_0 ) 1027 XSPerfAccumulate("ifu_req_cacheline_1", f3_req_1 ) 1028 XSPerfAccumulate("ifu_req_cacheline_0_hit", f3_hit_0 ) 1029 XSPerfAccumulate("ifu_req_cacheline_1_hit", f3_hit_1 ) 1030 XSPerfAccumulate("frontendFlush", wb_redirect ) 1031 XSPerfAccumulate("only_0_hit", f3_perf_info.only_0_hit && io.toIbuffer.fire ) 1032 XSPerfAccumulate("only_0_miss", f3_perf_info.only_0_miss && io.toIbuffer.fire ) 1033 XSPerfAccumulate("hit_0_hit_1", f3_perf_info.hit_0_hit_1 && io.toIbuffer.fire ) 1034 XSPerfAccumulate("hit_0_miss_1", f3_perf_info.hit_0_miss_1 && io.toIbuffer.fire ) 1035 XSPerfAccumulate("miss_0_hit_1", f3_perf_info.miss_0_hit_1 && io.toIbuffer.fire ) 1036 XSPerfAccumulate("miss_0_miss_1", f3_perf_info.miss_0_miss_1 && io.toIbuffer.fire ) 1037 XSPerfAccumulate("hit_0_except_1", f3_perf_info.hit_0_except_1 && io.toIbuffer.fire ) 1038 XSPerfAccumulate("miss_0_except_1", f3_perf_info.miss_0_except_1 && io.toIbuffer.fire ) 1039 XSPerfAccumulate("except_0", f3_perf_info.except_0 && io.toIbuffer.fire ) 1040 XSPerfHistogram("ifu2ibuffer_validCnt", PopCount(io.toIbuffer.bits.valid & io.toIbuffer.bits.enqEnable), io.toIbuffer.fire, 0, PredictWidth + 1, 1) 1041 1042 val hartId = p(XSCoreParamsKey).HartId 1043 val isWriteFetchToIBufferTable = Constantin.createRecord(s"isWriteFetchToIBufferTable$hartId") 1044 val isWriteIfuWbToFtqTable = Constantin.createRecord(s"isWriteIfuWbToFtqTable$hartId") 1045 val fetchToIBufferTable = ChiselDB.createTable(s"FetchToIBuffer$hartId", new FetchToIBufferDB) 1046 val ifuWbToFtqTable = ChiselDB.createTable(s"IfuWbToFtq$hartId", new IfuWbToFtqDB) 1047 1048 val fetchIBufferDumpData = Wire(new FetchToIBufferDB) 1049 fetchIBufferDumpData.start_addr := f3_ftq_req.startAddr 1050 fetchIBufferDumpData.instr_count := PopCount(io.toIbuffer.bits.enqEnable) 1051 fetchIBufferDumpData.exception := (f3_perf_info.except_0 && io.toIbuffer.fire) || (f3_perf_info.hit_0_except_1 && io.toIbuffer.fire) || (f3_perf_info.miss_0_except_1 && io.toIbuffer.fire) 1052 fetchIBufferDumpData.is_cache_hit := f3_hit 1053 1054 val ifuWbToFtqDumpData = Wire(new IfuWbToFtqDB) 1055 ifuWbToFtqDumpData.start_addr := wb_ftq_req.startAddr 1056 ifuWbToFtqDumpData.is_miss_pred := checkFlushWb.bits.misOffset.valid 1057 ifuWbToFtqDumpData.miss_pred_offset := checkFlushWb.bits.misOffset.bits 1058 ifuWbToFtqDumpData.checkJalFault := checkJalFault 1059 ifuWbToFtqDumpData.checkRetFault := checkRetFault 1060 ifuWbToFtqDumpData.checkTargetFault := checkTargetFault 1061 ifuWbToFtqDumpData.checkNotCFIFault := checkNotCFIFault 1062 ifuWbToFtqDumpData.checkInvalidTaken := checkInvalidTaken 1063 1064 fetchToIBufferTable.log( 1065 data = fetchIBufferDumpData, 1066 en = isWriteFetchToIBufferTable.orR && io.toIbuffer.fire, 1067 site = "IFU" + p(XSCoreParamsKey).HartId.toString, 1068 clock = clock, 1069 reset = reset 1070 ) 1071 ifuWbToFtqTable.log( 1072 data = ifuWbToFtqDumpData, 1073 en = isWriteIfuWbToFtqTable.orR && checkFlushWb.valid, 1074 site = "IFU" + p(XSCoreParamsKey).HartId.toString, 1075 clock = clock, 1076 reset = reset 1077 ) 1078 1079} 1080