1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.frontend 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import freechips.rocketchip.rocket.RVCDecoder 23import xiangshan._ 24import xiangshan.cache.mmu._ 25import xiangshan.frontend.icache._ 26import utils._ 27import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle} 28 29trait HasInstrMMIOConst extends HasXSParameter with HasIFUConst{ 30 def mmioBusWidth = 64 31 def mmioBusBytes = mmioBusWidth / 8 32 def maxInstrLen = 32 33} 34 35trait HasIFUConst extends HasXSParameter{ 36 def addrAlign(addr: UInt, bytes: Int, highest: Int): UInt = Cat(addr(highest-1, log2Ceil(bytes)), 0.U(log2Ceil(bytes).W)) 37 def fetchQueueSize = 2 38 39 def getBasicBlockIdx( pc: UInt, start: UInt ): UInt = { 40 val byteOffset = pc - start 41 (byteOffset - instBytes.U)(log2Ceil(PredictWidth),instOffsetBits) 42 } 43} 44 45class IfuToFtqIO(implicit p:Parameters) extends XSBundle { 46 val pdWb = Valid(new PredecodeWritebackBundle) 47} 48 49class FtqInterface(implicit p: Parameters) extends XSBundle { 50 val fromFtq = Flipped(new FtqToIfuIO) 51 val toFtq = new IfuToFtqIO 52} 53 54class UncacheInterface(implicit p: Parameters) extends XSBundle { 55 val fromUncache = Flipped(DecoupledIO(new InsUncacheResp)) 56 val toUncache = DecoupledIO( new InsUncacheReq ) 57} 58class NewIFUIO(implicit p: Parameters) extends XSBundle { 59 val ftqInter = new FtqInterface 60 val icacheInter = Vec(2, Flipped(new ICacheMainPipeBundle)) 61 val icacheStop = Output(Bool()) 62 val icachePerfInfo = Input(new ICachePerfInfo) 63 val toIbuffer = Decoupled(new FetchToIBuffer) 64 val uncacheInter = new UncacheInterface 65 val frontendTrigger = Flipped(new FrontendTdataDistributeIO) 66 val csrTriggerEnable = Input(Vec(4, Bool())) 67 val rob_commits = Flipped(Vec(CommitWidth, Valid(new RobCommitInfo))) 68} 69 70// record the situation in which fallThruAddr falls into 71// the middle of an RVI inst 72class LastHalfInfo(implicit p: Parameters) extends XSBundle { 73 val valid = Bool() 74 val middlePC = UInt(VAddrBits.W) 75 def matchThisBlock(startAddr: UInt) = valid && middlePC === startAddr 76} 77 78class IfuToPreDecode(implicit p: Parameters) extends XSBundle { 79 val data = if(HasCExtension) Vec(PredictWidth + 1, UInt(16.W)) else Vec(PredictWidth, UInt(32.W)) 80 val frontendTrigger = new FrontendTdataDistributeIO 81 val csrTriggerEnable = Vec(4, Bool()) 82 val pc = Vec(PredictWidth, UInt(VAddrBits.W)) 83} 84 85 86class IfuToPredChecker(implicit p: Parameters) extends XSBundle { 87 val ftqOffset = Valid(UInt(log2Ceil(PredictWidth).W)) 88 val jumpOffset = Vec(PredictWidth, UInt(XLEN.W)) 89 val target = UInt(VAddrBits.W) 90 val instrRange = Vec(PredictWidth, Bool()) 91 val instrValid = Vec(PredictWidth, Bool()) 92 val pds = Vec(PredictWidth, new PreDecodeInfo) 93 val pc = Vec(PredictWidth, UInt(VAddrBits.W)) 94} 95 96class NewIFU(implicit p: Parameters) extends XSModule 97 with HasICacheParameters 98 with HasIFUConst 99 with HasPdConst 100 with HasCircularQueuePtrHelper 101 with HasPerfEvents 102{ 103 println(s"icache ways: ${nWays} sets:${nSets}") 104 val io = IO(new NewIFUIO) 105 val (toFtq, fromFtq) = (io.ftqInter.toFtq, io.ftqInter.fromFtq) 106 val (toICache, fromICache) = (VecInit(io.icacheInter.map(_.req)), VecInit(io.icacheInter.map(_.resp))) 107 val (toUncache, fromUncache) = (io.uncacheInter.toUncache , io.uncacheInter.fromUncache) 108 109 def isCrossLineReq(start: UInt, end: UInt): Bool = start(blockOffBits) ^ end(blockOffBits) 110 111 def isLastInCacheline(addr: UInt): Bool = addr(blockOffBits - 1, 1) === 0.U 112 113 class TlbExept(implicit p: Parameters) extends XSBundle{ 114 val pageFault = Bool() 115 val accessFault = Bool() 116 val mmio = Bool() 117 } 118 119 val preDecoder = Module(new PreDecode) 120 val predChecker = Module(new PredChecker) 121 val frontendTrigger = Module(new FrontendTrigger) 122 val (preDecoderIn, preDecoderOut) = (preDecoder.io.in, preDecoder.io.out) 123 val (checkerIn, checkerOut) = (predChecker.io.in, predChecker.io.out) 124 125 /** 126 ****************************************************************************** 127 * IFU Stage 0 128 * - send cacheline fetch request to ICacheMainPipe 129 ****************************************************************************** 130 */ 131 132 val f0_valid = fromFtq.req.valid 133 val f0_ftq_req = fromFtq.req.bits 134 val f0_doubleLine = fromFtq.req.bits.crossCacheline 135 val f0_vSetIdx = VecInit(get_idx((f0_ftq_req.startAddr)), get_idx(f0_ftq_req.nextlineStart)) 136 val f0_fire = fromFtq.req.fire() 137 138 val f0_flush, f1_flush, f2_flush, f3_flush = WireInit(false.B) 139 val from_bpu_f0_flush, from_bpu_f1_flush, from_bpu_f2_flush, from_bpu_f3_flush = WireInit(false.B) 140 141 from_bpu_f0_flush := fromFtq.flushFromBpu.shouldFlushByStage2(f0_ftq_req.ftqIdx)/* || 142 fromFtq.flushFromBpu.shouldFlushByStage3(f0_ftq_req.ftqIdx) */ 143 144 val wb_redirect , mmio_redirect, backend_redirect= WireInit(false.B) 145 val f3_wb_not_flush = WireInit(false.B) 146 147 backend_redirect := fromFtq.redirect.valid 148 f3_flush := backend_redirect || (wb_redirect && !f3_wb_not_flush) 149 f2_flush := backend_redirect || mmio_redirect || wb_redirect 150 f1_flush := f2_flush || from_bpu_f1_flush 151 f0_flush := f1_flush || from_bpu_f0_flush 152 153 val f1_ready, f2_ready, f3_ready = WireInit(false.B) 154 155 fromFtq.req.ready := toICache(0).ready && toICache(1).ready && f2_ready && GTimer() > 500.U 156 157 toICache(0).valid := fromFtq.req.valid && !f0_flush 158 toICache(0).bits.vaddr := fromFtq.req.bits.startAddr 159 toICache(1).valid := fromFtq.req.valid && f0_doubleLine && !f0_flush 160 toICache(1).bits.vaddr := fromFtq.req.bits.nextlineStart//fromFtq.req.bits.startAddr + (PredictWidth * 2).U //TODO: timing critical 161 162 /** <PERF> f0 fetch bubble */ 163 164 XSPerfAccumulate("fetch_bubble_ftq_not_valid", !f0_valid ) 165 XSPerfAccumulate("fetch_bubble_pipe_stall", f0_valid && toICache(0).ready && toICache(1).ready && !f1_ready ) 166 XSPerfAccumulate("fetch_bubble_sram_0_busy", f0_valid && !toICache(0).ready ) 167 XSPerfAccumulate("fetch_bubble_sram_1_busy", f0_valid && !toICache(1).ready ) 168 169 170 /** 171 ****************************************************************************** 172 * IFU Stage 1 173 * - calculate pc/half_pc/cut_ptr for every instruction 174 ****************************************************************************** 175 */ 176 177 val f1_valid = RegInit(false.B) 178 val f1_ftq_req = RegEnable(next = f0_ftq_req, enable=f0_fire) 179 // val f1_situation = RegEnable(next = f0_situation, enable=f0_fire) 180 val f1_doubleLine = RegEnable(next = f0_doubleLine, enable=f0_fire) 181 val f1_vSetIdx = RegEnable(next = f0_vSetIdx, enable=f0_fire) 182 val f1_fire = f1_valid && f1_ready 183 184 f1_ready := f2_ready || !f1_valid 185 186 // from_bpu_f1_flush := fromFtq.flushFromBpu.shouldFlushByStage3(f1_ftq_req.ftqIdx) 187 from_bpu_f1_flush := false.B 188 189 when(f1_flush) {f1_valid := false.B} 190 .elsewhen(f0_fire && !f0_flush) {f1_valid := true.B} 191 .elsewhen(f1_fire) {f1_valid := false.B} 192 193 val f1_pc = VecInit((0 until PredictWidth).map(i => f1_ftq_req.startAddr + (i * 2).U)) 194 val f1_half_snpc = VecInit((0 until PredictWidth).map(i => f1_ftq_req.startAddr + ((i+2) * 2).U)) 195 val f1_cut_ptr = if(HasCExtension) VecInit((0 until PredictWidth + 1).map(i => Cat(0.U(1.W), f1_ftq_req.startAddr(blockOffBits-1, 1)) + i.U )) 196 else VecInit((0 until PredictWidth).map(i => Cat(0.U(1.W), f1_ftq_req.startAddr(blockOffBits-1, 2)) + i.U )) 197 198 /** 199 ****************************************************************************** 200 * IFU Stage 2 201 * - icache response data (latched for pipeline stop) 202 * - generate exceprion bits for every instruciton (page fault/access fault/mmio) 203 * - generate predicted instruction range (1 means this instruciton is in this fetch packet) 204 * - cut data from cachlines to packet instruction code 205 * - instruction predecode and RVC expand 206 ****************************************************************************** 207 */ 208 209 val icacheRespAllValid = WireInit(false.B) 210 211 val f2_valid = RegInit(false.B) 212 val f2_ftq_req = RegEnable(next = f1_ftq_req, enable=f1_fire) 213 // val f2_situation = RegEnable(next = f1_situation, enable=f1_fire) 214 val f2_doubleLine = RegEnable(next = f1_doubleLine, enable=f1_fire) 215 val f2_vSetIdx = RegEnable(next = f1_vSetIdx, enable=f1_fire) 216 val f2_fire = f2_valid && f2_ready 217 218 f2_ready := f3_ready && icacheRespAllValid || !f2_valid 219 //TODO: addr compare may be timing critical 220 val f2_icache_all_resp_wire = fromICache(0).valid && (fromICache(0).bits.vaddr === f2_ftq_req.startAddr) && ((fromICache(1).valid && (fromICache(1).bits.vaddr === f2_ftq_req.nextlineStart)) || !f2_doubleLine) 221 val f2_icache_all_resp_reg = RegInit(false.B) 222 223 icacheRespAllValid := f2_icache_all_resp_reg || f2_icache_all_resp_wire 224 225 io.icacheStop := !f3_ready 226 227 when(f2_flush) {f2_icache_all_resp_reg := false.B} 228 .elsewhen(f2_valid && f2_icache_all_resp_wire && !f3_ready) {f2_icache_all_resp_reg := true.B} 229 .elsewhen(f2_fire && f2_icache_all_resp_reg) {f2_icache_all_resp_reg := false.B} 230 231 when(f2_flush) {f2_valid := false.B} 232 .elsewhen(f1_fire && !f1_flush) {f2_valid := true.B } 233 .elsewhen(f2_fire) {f2_valid := false.B} 234 235 val f2_cache_response_data = ResultHoldBypass(valid = f2_icache_all_resp_wire, data = VecInit(fromICache.map(_.bits.readData))) 236 237 val f2_except_pf = VecInit((0 until PortNumber).map(i => fromICache(i).bits.tlbExcp.pageFault)) 238 val f2_except_af = VecInit((0 until PortNumber).map(i => fromICache(i).bits.tlbExcp.accessFault)) 239 val f2_mmio = fromICache(0).bits.tlbExcp.mmio && !fromICache(0).bits.tlbExcp.accessFault && 240 !fromICache(0).bits.tlbExcp.pageFault 241 242 val f2_pc = RegEnable(next = f1_pc, enable = f1_fire) 243 val f2_half_snpc = RegEnable(next = f1_half_snpc, enable = f1_fire) 244 val f2_cut_ptr = RegEnable(next = f1_cut_ptr, enable = f1_fire) 245 246 def isNextLine(pc: UInt, startAddr: UInt) = { 247 startAddr(blockOffBits) ^ pc(blockOffBits) 248 } 249 250 def isLastInLine(pc: UInt) = { 251 pc(blockOffBits - 1, 0) === "b111110".U 252 } 253 254 val f2_foldpc = VecInit(f2_pc.map(i => XORFold(i(VAddrBits-1,1), MemPredPCWidth))) 255 val f2_jump_range = Fill(PredictWidth, !f2_ftq_req.ftqOffset.valid) | Fill(PredictWidth, 1.U(1.W)) >> ~f2_ftq_req.ftqOffset.bits 256 val f2_ftr_range = Fill(PredictWidth, f2_ftq_req.oversize || f2_ftq_req.ftqOffset.valid) | Fill(PredictWidth, 1.U(1.W)) >> ~getBasicBlockIdx(f2_ftq_req.nextStartAddr, f2_ftq_req.startAddr) 257 val f2_instr_range = f2_jump_range & f2_ftr_range 258 val f2_pf_vec = VecInit((0 until PredictWidth).map(i => (!isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_except_pf(0) || isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_doubleLine && f2_except_pf(1)))) 259 val f2_af_vec = VecInit((0 until PredictWidth).map(i => (!isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_except_af(0) || isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_doubleLine && f2_except_af(1)))) 260 261 val f2_paddrs = VecInit((0 until PortNumber).map(i => fromICache(i).bits.paddr)) 262 val f2_perf_info = io.icachePerfInfo 263 264 def cut(cacheline: UInt, cutPtr: Vec[UInt]) : Vec[UInt] ={ 265 if(HasCExtension){ 266 val result = Wire(Vec(PredictWidth + 1, UInt(16.W))) 267 val dataVec = cacheline.asTypeOf(Vec(blockBytes * 2/ 2, UInt(16.W))) 268 (0 until PredictWidth + 1).foreach( i => 269 result(i) := dataVec(cutPtr(i)) 270 ) 271 result 272 } else { 273 val result = Wire(Vec(PredictWidth, UInt(32.W)) ) 274 val dataVec = cacheline.asTypeOf(Vec(blockBytes * 2/ 4, UInt(32.W))) 275 (0 until PredictWidth).foreach( i => 276 result(i) := dataVec(cutPtr(i)) 277 ) 278 result 279 } 280 } 281 282 val f2_datas = VecInit((0 until PortNumber).map(i => f2_cache_response_data(i))) 283 val f2_cut_data = cut( Cat(f2_datas.map(cacheline => cacheline.asUInt ).reverse).asUInt, f2_cut_ptr ) 284 285 /** predecode (include RVC expander) */ 286 preDecoderIn.data := f2_cut_data 287 preDecoderIn.frontendTrigger := io.frontendTrigger 288 preDecoderIn.csrTriggerEnable := io.csrTriggerEnable 289 preDecoderIn.pc := f2_pc 290 291 val f2_expd_instr = preDecoderOut.expInstr 292 val f2_pd = preDecoderOut.pd 293 val f2_jump_offset = preDecoderOut.jumpOffset 294 val f2_hasHalfValid = preDecoderOut.hasHalfValid 295 val f2_crossPageFault = VecInit((0 until PredictWidth).map(i => isLastInLine(f2_pc(i)) && !f2_except_pf(0) && f2_doubleLine && f2_except_pf(1) && !f2_pd(i).isRVC )) 296 297 val predecodeOutValid = WireInit(false.B) 298 299 300 /** 301 ****************************************************************************** 302 * IFU Stage 3 303 * - handle MMIO instruciton 304 * -send request to Uncache fetch Unit 305 * -every packet include 1 MMIO instruction 306 * -MMIO instructions will stop fetch pipeline until commiting from RoB 307 * -flush to snpc (send ifu_redirect to Ftq) 308 * - Ibuffer enqueue 309 * - check predict result in Frontend (jalFault/retFault/notCFIFault/invalidTakenFault/targetFault) 310 * - handle last half RVI instruction 311 ****************************************************************************** 312 */ 313 314 val f3_valid = RegInit(false.B) 315 val f3_ftq_req = RegEnable(next = f2_ftq_req, enable=f2_fire) 316 // val f3_situation = RegEnable(next = f2_situation, enable=f2_fire) 317 val f3_doubleLine = RegEnable(next = f2_doubleLine, enable=f2_fire) 318 val f3_fire = io.toIbuffer.fire() 319 320 f3_ready := io.toIbuffer.ready || !f3_valid 321 322 val f3_cut_data = RegEnable(next = f2_cut_data, enable=f2_fire) 323 324 val f3_except_pf = RegEnable(next = f2_except_pf, enable = f2_fire) 325 val f3_except_af = RegEnable(next = f2_except_af, enable = f2_fire) 326 val f3_mmio = RegEnable(next = f2_mmio , enable = f2_fire) 327 328 val f3_expd_instr = RegEnable(next = f2_expd_instr, enable = f2_fire) 329 val f3_pd = RegEnable(next = f2_pd, enable = f2_fire) 330 val f3_jump_offset = RegEnable(next = f2_jump_offset, enable = f2_fire) 331 val f3_af_vec = RegEnable(next = f2_af_vec, enable = f2_fire) 332 val f3_pf_vec = RegEnable(next = f2_pf_vec , enable = f2_fire) 333 val f3_pc = RegEnable(next = f2_pc, enable = f2_fire) 334 val f3_half_snpc = RegEnable(next = f2_half_snpc, enable = f2_fire) 335 val f3_instr_range = RegEnable(next = f2_instr_range, enable = f2_fire) 336 val f3_foldpc = RegEnable(next = f2_foldpc, enable = f2_fire) 337 val f3_crossPageFault = RegEnable(next = f2_crossPageFault, enable = f2_fire) 338 val f3_hasHalfValid = RegEnable(next = f2_hasHalfValid, enable = f2_fire) 339 val f3_except = VecInit((0 until 2).map{i => f3_except_pf(i) || f3_except_af(i)}) 340 val f3_has_except = f3_valid && (f3_except_af.reduce(_||_) || f3_except_pf.reduce(_||_)) 341 val f3_pAddrs = RegEnable(next = f2_paddrs, enable = f2_fire) 342 343 val f3_oversize_target = f3_pc.last + 2.U 344 345 /*** MMIO State Machine***/ 346 val f3_mmio_data = Reg(UInt(maxInstrLen.W)) 347 348 val mmio_idle :: mmio_send_req :: mmio_w_resp :: mmio_resend :: mmio_resend_w_resp :: mmio_wait_commit :: mmio_commited :: Nil = Enum(7) 349 val mmio_state = RegInit(mmio_idle) 350 351 val f3_req_is_mmio = f3_mmio && f3_valid 352 val mmio_commit = VecInit(io.rob_commits.map{commit => commit.valid && commit.bits.ftqIdx === f3_ftq_req.ftqIdx && commit.bits.ftqOffset === 0.U}).asUInt.orR 353 val f3_mmio_req_commit = f3_req_is_mmio && mmio_state === mmio_commited 354 355 val f3_mmio_to_commit = f3_req_is_mmio && mmio_state === mmio_wait_commit 356 val f3_mmio_to_commit_next = RegNext(f3_mmio_to_commit) 357 val f3_mmio_can_go = f3_mmio_to_commit && !f3_mmio_to_commit_next 358 359 val f3_ftq_flush_self = fromFtq.redirect.valid && RedirectLevel.flushItself(fromFtq.redirect.bits.level) 360 val f3_ftq_flush_by_older = fromFtq.redirect.valid && isBefore(fromFtq.redirect.bits.ftqIdx, f3_ftq_req.ftqIdx) 361 362 val f3_need_not_flush = f3_req_is_mmio && fromFtq.redirect.valid && !f3_ftq_flush_self && !f3_ftq_flush_by_older 363 364 when(f3_flush && !f3_need_not_flush) {f3_valid := false.B} 365 .elsewhen(f2_fire && !f2_flush ) {f3_valid := true.B } 366 .elsewhen(io.toIbuffer.fire() && !f3_req_is_mmio) {f3_valid := false.B} 367 .elsewhen{f3_req_is_mmio && f3_mmio_req_commit} {f3_valid := false.B} 368 369 val f3_mmio_use_seq_pc = RegInit(false.B) 370 371 val (redirect_ftqIdx, redirect_ftqOffset) = (fromFtq.redirect.bits.ftqIdx,fromFtq.redirect.bits.ftqOffset) 372 val redirect_mmio_req = fromFtq.redirect.valid && redirect_ftqIdx === f3_ftq_req.ftqIdx && redirect_ftqOffset === 0.U 373 374 when(RegNext(f2_fire && !f2_flush) && f3_req_is_mmio) { f3_mmio_use_seq_pc := true.B } 375 .elsewhen(redirect_mmio_req) { f3_mmio_use_seq_pc := false.B } 376 377 f3_ready := Mux(f3_req_is_mmio, io.toIbuffer.ready && f3_mmio_req_commit || !f3_valid , io.toIbuffer.ready || !f3_valid) 378 379 when(fromUncache.fire()) {f3_mmio_data := fromUncache.bits.data} 380 381 382 switch(mmio_state){ 383 is(mmio_idle){ 384 when(f3_req_is_mmio){ 385 mmio_state := mmio_send_req 386 } 387 } 388 389 is(mmio_send_req){ 390 mmio_state := Mux(toUncache.fire(), mmio_w_resp, mmio_send_req ) 391 } 392 393 is(mmio_w_resp){ 394 when(fromUncache.fire()){ 395 val isRVC = fromUncache.bits.data(1,0) =/= 3.U 396 mmio_state := Mux(isRVC, mmio_resend , mmio_wait_commit) 397 } 398 } 399 400 is(mmio_resend){ 401 mmio_state := Mux(toUncache.fire(), mmio_resend_w_resp, mmio_resend ) 402 } 403 404 is(mmio_resend_w_resp){ 405 when(fromUncache.fire()){ 406 mmio_state := mmio_wait_commit 407 } 408 } 409 410 is(mmio_wait_commit){ 411 when(mmio_commit){ 412 mmio_state := mmio_commited 413 } 414 } 415 416 is(mmio_commited){ 417 mmio_state := mmio_idle 418 } 419 } 420 421 when(f3_ftq_flush_self || f3_ftq_flush_by_older) { 422 mmio_state := mmio_idle 423 f3_mmio_data := 0.U 424 } 425 426 toUncache.valid := ((mmio_state === mmio_send_req) || (mmio_state === mmio_resend)) && f3_req_is_mmio 427 toUncache.bits.addr := Mux((mmio_state === mmio_resend), f3_pAddrs(0) + 2.U, f3_pAddrs(0)) 428 fromUncache.ready := true.B 429 430 431 val f3_lastHalf = RegInit(0.U.asTypeOf(new LastHalfInfo)) 432 433 val f3_predecode_range = VecInit(preDecoderOut.pd.map(inst => inst.valid)).asUInt 434 val f3_mmio_range = VecInit((0 until PredictWidth).map(i => if(i ==0) true.B else false.B)) 435 val f3_instr_valid = Wire(Vec(PredictWidth, Bool())) 436 437 /*** prediction result check ***/ 438 checkerIn.ftqOffset := f3_ftq_req.ftqOffset 439 checkerIn.jumpOffset := f3_jump_offset 440 checkerIn.target := f3_ftq_req.nextStartAddr 441 checkerIn.instrRange := f3_instr_range.asTypeOf(Vec(PredictWidth, Bool())) 442 checkerIn.instrValid := f3_instr_valid.asTypeOf(Vec(PredictWidth, Bool())) 443 checkerIn.pds := f3_pd 444 checkerIn.pc := f3_pc 445 446 /*** handle half RVI in the last 2 Bytes ***/ 447 448 def hasLastHalf(idx: UInt) = { 449 !f3_pd(idx).isRVC && checkerOut.fixedRange(idx) && f3_instr_valid(idx) && !checkerOut.fixedTaken(idx) && !checkerOut.fixedMissPred(idx) && ! f3_req_is_mmio && !f3_ftq_req.oversize 450 } 451 452 val f3_last_validIdx = ~ParallelPriorityEncoder(checkerOut.fixedRange.reverse) 453 454 val f3_hasLastHalf = hasLastHalf((PredictWidth - 1).U) 455 val f3_false_lastHalf = hasLastHalf(f3_last_validIdx) 456 val f3_false_snpc = f3_half_snpc(f3_last_validIdx) 457 458 val f3_lastHalf_mask = VecInit((0 until PredictWidth).map( i => if(i ==0) false.B else true.B )).asUInt() 459 460 when (f3_flush) { 461 f3_lastHalf.valid := false.B 462 }.elsewhen (f3_fire) { 463 f3_lastHalf.valid := f3_hasLastHalf 464 f3_lastHalf.middlePC := f3_ftq_req.nextStartAddr 465 } 466 467 f3_instr_valid := Mux(f3_lastHalf.valid,f3_hasHalfValid ,VecInit(f3_pd.map(inst => inst.valid))) 468 469 /*** frontend Trigger ***/ 470 frontendTrigger.io.pds := f3_pd 471 frontendTrigger.io.pc := f3_pc 472 frontendTrigger.io.data := f3_cut_data 473 474 frontendTrigger.io.frontendTrigger := io.frontendTrigger 475 frontendTrigger.io.csrTriggerEnable := io.csrTriggerEnable 476 477 val f3_triggered = frontendTrigger.io.triggered 478 479 /*** send to Ibuffer ***/ 480 481 io.toIbuffer.valid := f3_valid && (!f3_req_is_mmio || f3_mmio_can_go) && !f3_flush 482 io.toIbuffer.bits.instrs := f3_expd_instr 483 io.toIbuffer.bits.valid := f3_instr_valid.asUInt 484 io.toIbuffer.bits.enqEnable := checkerOut.fixedRange.asUInt & f3_instr_valid.asUInt 485 io.toIbuffer.bits.pd := f3_pd 486 io.toIbuffer.bits.ftqPtr := f3_ftq_req.ftqIdx 487 io.toIbuffer.bits.pc := f3_pc 488 io.toIbuffer.bits.ftqOffset.zipWithIndex.map{case(a, i) => a.bits := i.U; a.valid := checkerOut.fixedTaken(i) && !f3_req_is_mmio} 489 io.toIbuffer.bits.foldpc := f3_foldpc 490 io.toIbuffer.bits.ipf := f3_pf_vec 491 io.toIbuffer.bits.acf := f3_af_vec 492 io.toIbuffer.bits.crossPageIPFFix := f3_crossPageFault 493 io.toIbuffer.bits.triggered := f3_triggered 494 495 val lastHalfMask = VecInit((0 until PredictWidth).map(i => if(i ==0) false.B else true.B)) 496 when(f3_lastHalf.valid){ 497 io.toIbuffer.bits.enqEnable := checkerOut.fixedRange.asUInt & f3_instr_valid.asUInt & lastHalfMask.asUInt 498 io.toIbuffer.bits.valid := f3_lastHalf_mask & f3_instr_valid.asUInt 499 } 500 501 /** external predecode for MMIO instruction */ 502 when(f3_req_is_mmio){ 503 val inst = Cat(f3_mmio_data(31,16), f3_mmio_data(15,0)) 504 val currentIsRVC = isRVC(inst) 505 506 val brType::isCall::isRet::Nil = brInfo(inst) 507 val jalOffset = jal_offset(inst, currentIsRVC) 508 val brOffset = br_offset(inst, currentIsRVC) 509 510 io.toIbuffer.bits.instrs (0) := new RVCDecoder(inst, XLEN).decode.bits 511 512 io.toIbuffer.bits.pd(0).valid := true.B 513 io.toIbuffer.bits.pd(0).isRVC := currentIsRVC 514 io.toIbuffer.bits.pd(0).brType := brType 515 io.toIbuffer.bits.pd(0).isCall := isCall 516 io.toIbuffer.bits.pd(0).isRet := isRet 517 518 io.toIbuffer.bits.enqEnable := f3_mmio_range.asUInt 519 } 520 521 522 //Write back to Ftq 523 val f3_cache_fetch = f3_valid && !(f2_fire && !f2_flush) 524 val finishFetchMaskReg = RegNext(f3_cache_fetch) 525 526 val mmioFlushWb = Wire(Valid(new PredecodeWritebackBundle)) 527 val f3_mmio_missOffset = Wire(ValidUndirectioned(UInt(log2Ceil(PredictWidth).W))) 528 f3_mmio_missOffset.valid := f3_req_is_mmio 529 f3_mmio_missOffset.bits := 0.U 530 531 mmioFlushWb.valid := (f3_req_is_mmio && mmio_state === mmio_wait_commit && RegNext(fromUncache.fire()) && f3_mmio_use_seq_pc) 532 mmioFlushWb.bits.pc := f3_pc 533 mmioFlushWb.bits.pd := f3_pd 534 mmioFlushWb.bits.pd.zipWithIndex.map{case(instr,i) => instr.valid := f3_mmio_range(i)} 535 mmioFlushWb.bits.ftqIdx := f3_ftq_req.ftqIdx 536 mmioFlushWb.bits.ftqOffset := f3_ftq_req.ftqOffset.bits 537 mmioFlushWb.bits.misOffset := f3_mmio_missOffset 538 mmioFlushWb.bits.cfiOffset := DontCare 539 mmioFlushWb.bits.target := Mux((f3_mmio_data(1,0) =/= 3.U), f3_ftq_req.startAddr + 2.U , f3_ftq_req.startAddr + 4.U) 540 mmioFlushWb.bits.jalTarget := DontCare 541 mmioFlushWb.bits.instrRange := f3_mmio_range 542 543 mmio_redirect := (f3_req_is_mmio && mmio_state === mmio_wait_commit && RegNext(fromUncache.fire()) && f3_mmio_use_seq_pc) 544 545 /** 546 ****************************************************************************** 547 * IFU Write Back Stage 548 * - write back predecode information to Ftq to update 549 * - redirect if found fault prediction 550 * - redirect if has false hit last half (last PC is not start + 32 Bytes, but in the midle of an notCFI RVI instruction) 551 ****************************************************************************** 552 */ 553 554 val wb_valid = RegNext(RegNext(f2_fire && !f2_flush) && !f3_req_is_mmio && !f3_flush) 555 val wb_ftq_req = RegNext(f3_ftq_req) 556 557 val wb_check_result = RegNext(checkerOut) 558 val wb_instr_range = RegNext(io.toIbuffer.bits.enqEnable) 559 val wb_pc = RegNext(f3_pc) 560 val wb_pd = RegNext(f3_pd) 561 val wb_instr_valid = RegNext(f3_instr_valid) 562 563 /* false hit lastHalf */ 564 val wb_lastIdx = RegNext(f3_last_validIdx) 565 val wb_false_lastHalf = RegNext(f3_false_lastHalf) && wb_lastIdx =/= (PredictWidth - 1).U 566 val wb_false_target = RegNext(f3_false_snpc) 567 568 val wb_half_flush = wb_false_lastHalf 569 val wb_half_target = wb_false_target 570 571 /* false oversize */ 572 val lastIsRVC = wb_instr_range.asTypeOf(Vec(PredictWidth,Bool())).last && wb_pd.last.isRVC 573 val lastIsRVI = wb_instr_range.asTypeOf(Vec(PredictWidth,Bool()))(PredictWidth - 2) && !wb_pd(PredictWidth - 2).isRVC 574 val lastTaken = wb_check_result.fixedTaken.last 575 val wb_false_oversize = wb_valid && wb_ftq_req.oversize && (lastIsRVC || lastIsRVI) && !lastTaken 576 val wb_oversize_target = RegNext(f3_oversize_target) 577 578 when(wb_valid){ 579 assert(!wb_false_oversize || !wb_half_flush, "False oversize and false half should be exclusive. ") 580 } 581 582 f3_wb_not_flush := wb_ftq_req.ftqIdx === f3_ftq_req.ftqIdx && f3_valid && wb_valid 583 584 val checkFlushWb = Wire(Valid(new PredecodeWritebackBundle)) 585 checkFlushWb.valid := wb_valid 586 checkFlushWb.bits.pc := wb_pc 587 checkFlushWb.bits.pd := wb_pd 588 checkFlushWb.bits.pd.zipWithIndex.map{case(instr,i) => instr.valid := wb_instr_valid(i)} 589 checkFlushWb.bits.ftqIdx := wb_ftq_req.ftqIdx 590 checkFlushWb.bits.ftqOffset := wb_ftq_req.ftqOffset.bits 591 checkFlushWb.bits.misOffset.valid := ParallelOR(wb_check_result.fixedMissPred) || wb_half_flush || wb_false_oversize 592 checkFlushWb.bits.misOffset.bits := Mux(wb_half_flush, (PredictWidth - 1).U, ParallelPriorityEncoder(wb_check_result.fixedMissPred)) 593 checkFlushWb.bits.cfiOffset.valid := ParallelOR(wb_check_result.fixedTaken) 594 checkFlushWb.bits.cfiOffset.bits := ParallelPriorityEncoder(wb_check_result.fixedTaken) 595 checkFlushWb.bits.target := Mux(wb_false_oversize, wb_oversize_target, 596 Mux(wb_half_flush, wb_half_target, wb_check_result.fixedTarget(ParallelPriorityEncoder(wb_check_result.fixedMissPred)))) 597 checkFlushWb.bits.jalTarget := wb_check_result.fixedTarget(ParallelPriorityEncoder(VecInit(wb_pd.zip(wb_instr_valid).map{case (pd, v) => v && pd.isJal }))) 598 checkFlushWb.bits.instrRange := wb_instr_range.asTypeOf(Vec(PredictWidth, Bool())) 599 600 toFtq.pdWb := Mux(f3_req_is_mmio, mmioFlushWb, checkFlushWb) 601 602 wb_redirect := checkFlushWb.bits.misOffset.valid && wb_valid 603 604 605 /** performance counter */ 606 val f3_perf_info = RegEnable(next = f2_perf_info, enable = f2_fire) 607 val f3_req_0 = io.toIbuffer.fire() 608 val f3_req_1 = io.toIbuffer.fire() && f3_doubleLine 609 val f3_hit_0 = io.toIbuffer.fire() && f3_perf_info.bank_hit(0) 610 val f3_hit_1 = io.toIbuffer.fire() && f3_doubleLine & f3_perf_info.bank_hit(1) 611 val f3_hit = f3_perf_info.hit 612 val perfEvents = Seq( 613 ("frontendFlush ", wb_redirect ), 614 ("ifu_req ", io.toIbuffer.fire() ), 615 ("ifu_miss ", io.toIbuffer.fire() && !f3_perf_info.hit ), 616 ("ifu_req_cacheline_0 ", f3_req_0 ), 617 ("ifu_req_cacheline_1 ", f3_req_1 ), 618 ("ifu_req_cacheline_0_hit ", f3_hit_1 ), 619 ("ifu_req_cacheline_1_hit ", f3_hit_1 ), 620 ("only_0_hit ", f3_perf_info.only_0_hit && io.toIbuffer.fire() ), 621 ("only_0_miss ", f3_perf_info.only_0_miss && io.toIbuffer.fire() ), 622 ("hit_0_hit_1 ", f3_perf_info.hit_0_hit_1 && io.toIbuffer.fire() ), 623 ("hit_0_miss_1 ", f3_perf_info.hit_0_miss_1 && io.toIbuffer.fire() ), 624 ("miss_0_hit_1 ", f3_perf_info.miss_0_hit_1 && io.toIbuffer.fire() ), 625 ("miss_0_miss_1 ", f3_perf_info.miss_0_miss_1 && io.toIbuffer.fire() ), 626 // ("cross_line_block ", io.toIbuffer.fire() && f3_situation(0) ), 627 // ("fall_through_is_cacheline_end", io.toIbuffer.fire() && f3_situation(1) ), 628 ) 629 generatePerfEvent() 630 631 XSPerfAccumulate("ifu_req", io.toIbuffer.fire() ) 632 XSPerfAccumulate("ifu_miss", io.toIbuffer.fire() && !f3_hit ) 633 XSPerfAccumulate("ifu_req_cacheline_0", f3_req_0 ) 634 XSPerfAccumulate("ifu_req_cacheline_1", f3_req_1 ) 635 XSPerfAccumulate("ifu_req_cacheline_0_hit", f3_hit_0 ) 636 XSPerfAccumulate("ifu_req_cacheline_1_hit", f3_hit_1 ) 637 XSPerfAccumulate("frontendFlush", wb_redirect ) 638 XSPerfAccumulate("only_0_hit", f3_perf_info.only_0_hit && io.toIbuffer.fire() ) 639 XSPerfAccumulate("only_0_miss", f3_perf_info.only_0_miss && io.toIbuffer.fire() ) 640 XSPerfAccumulate("hit_0_hit_1", f3_perf_info.hit_0_hit_1 && io.toIbuffer.fire() ) 641 XSPerfAccumulate("hit_0_miss_1", f3_perf_info.hit_0_miss_1 && io.toIbuffer.fire() ) 642 XSPerfAccumulate("miss_0_hit_1", f3_perf_info.miss_0_hit_1 && io.toIbuffer.fire() ) 643 XSPerfAccumulate("miss_0_miss_1", f3_perf_info.miss_0_miss_1 && io.toIbuffer.fire() ) 644 // XSPerfAccumulate("cross_line_block", io.toIbuffer.fire() && f3_situation(0) ) 645 // XSPerfAccumulate("fall_through_is_cacheline_end", io.toIbuffer.fire() && f3_situation(1) ) 646} 647