1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.frontend 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import xiangshan._ 24import xiangshan.frontend.icache._ 25import xiangshan.backend.CtrlToFtqIO 26import xiangshan.backend.decode.ImmUnion 27 28class FtqPtr(implicit p: Parameters) extends CircularQueuePtr[FtqPtr]( 29 p => p(XSCoreParamsKey).FtqSize 30){ 31} 32 33object FtqPtr { 34 def apply(f: Bool, v: UInt)(implicit p: Parameters): FtqPtr = { 35 val ptr = Wire(new FtqPtr) 36 ptr.flag := f 37 ptr.value := v 38 ptr 39 } 40 def inverse(ptr: FtqPtr)(implicit p: Parameters): FtqPtr = { 41 apply(!ptr.flag, ptr.value) 42 } 43} 44 45class FtqNRSRAM[T <: Data](gen: T, numRead: Int)(implicit p: Parameters) extends XSModule { 46 47 val io = IO(new Bundle() { 48 val raddr = Input(Vec(numRead, UInt(log2Up(FtqSize).W))) 49 val ren = Input(Vec(numRead, Bool())) 50 val rdata = Output(Vec(numRead, gen)) 51 val waddr = Input(UInt(log2Up(FtqSize).W)) 52 val wen = Input(Bool()) 53 val wdata = Input(gen) 54 }) 55 56 for(i <- 0 until numRead){ 57 val sram = Module(new SRAMTemplate(gen, FtqSize)) 58 sram.io.r.req.valid := io.ren(i) 59 sram.io.r.req.bits.setIdx := io.raddr(i) 60 io.rdata(i) := sram.io.r.resp.data(0) 61 sram.io.w.req.valid := io.wen 62 sram.io.w.req.bits.setIdx := io.waddr 63 sram.io.w.req.bits.data := VecInit(io.wdata) 64 } 65 66} 67 68class Ftq_RF_Components(implicit p: Parameters) extends XSBundle with BPUUtils { 69 val startAddr = UInt(VAddrBits.W) 70 val nextLineAddr = UInt(VAddrBits.W) 71 val isNextMask = Vec(PredictWidth, Bool()) 72 val fallThruError = Bool() 73 // val carry = Bool() 74 def getPc(offset: UInt) = { 75 def getHigher(pc: UInt) = pc(VAddrBits-1, log2Ceil(PredictWidth)+instOffsetBits+1) 76 def getOffset(pc: UInt) = pc(log2Ceil(PredictWidth)+instOffsetBits, instOffsetBits) 77 Cat(getHigher(Mux(isNextMask(offset) && startAddr(log2Ceil(PredictWidth)+instOffsetBits), nextLineAddr, startAddr)), 78 getOffset(startAddr)+offset, 0.U(instOffsetBits.W)) 79 } 80 def fromBranchPrediction(resp: BranchPredictionBundle) = { 81 def carryPos(addr: UInt) = addr(instOffsetBits+log2Ceil(PredictWidth)+1) 82 this.startAddr := resp.pc 83 this.nextLineAddr := resp.pc + (FetchWidth * 4 * 2).U // may be broken on other configs 84 this.isNextMask := VecInit((0 until PredictWidth).map(i => 85 (resp.pc(log2Ceil(PredictWidth), 1) +& i.U)(log2Ceil(PredictWidth)).asBool() 86 )) 87 this.fallThruError := resp.fallThruError 88 this 89 } 90 override def toPrintable: Printable = { 91 p"startAddr:${Hexadecimal(startAddr)}" 92 } 93} 94 95class Ftq_pd_Entry(implicit p: Parameters) extends XSBundle { 96 val brMask = Vec(PredictWidth, Bool()) 97 val jmpInfo = ValidUndirectioned(Vec(3, Bool())) 98 val jmpOffset = UInt(log2Ceil(PredictWidth).W) 99 val jalTarget = UInt(VAddrBits.W) 100 val rvcMask = Vec(PredictWidth, Bool()) 101 def hasJal = jmpInfo.valid && !jmpInfo.bits(0) 102 def hasJalr = jmpInfo.valid && jmpInfo.bits(0) 103 def hasCall = jmpInfo.valid && jmpInfo.bits(1) 104 def hasRet = jmpInfo.valid && jmpInfo.bits(2) 105 106 def fromPdWb(pdWb: PredecodeWritebackBundle) = { 107 val pds = pdWb.pd 108 this.brMask := VecInit(pds.map(pd => pd.isBr && pd.valid)) 109 this.jmpInfo.valid := VecInit(pds.map(pd => (pd.isJal || pd.isJalr) && pd.valid)).asUInt.orR 110 this.jmpInfo.bits := ParallelPriorityMux(pds.map(pd => (pd.isJal || pd.isJalr) && pd.valid), 111 pds.map(pd => VecInit(pd.isJalr, pd.isCall, pd.isRet))) 112 this.jmpOffset := ParallelPriorityEncoder(pds.map(pd => (pd.isJal || pd.isJalr) && pd.valid)) 113 this.rvcMask := VecInit(pds.map(pd => pd.isRVC)) 114 this.jalTarget := pdWb.jalTarget 115 } 116 117 def toPd(offset: UInt) = { 118 require(offset.getWidth == log2Ceil(PredictWidth)) 119 val pd = Wire(new PreDecodeInfo) 120 pd.valid := true.B 121 pd.isRVC := rvcMask(offset) 122 val isBr = brMask(offset) 123 val isJalr = offset === jmpOffset && jmpInfo.valid && jmpInfo.bits(0) 124 pd.brType := Cat(offset === jmpOffset && jmpInfo.valid, isJalr || isBr) 125 pd.isCall := offset === jmpOffset && jmpInfo.valid && jmpInfo.bits(1) 126 pd.isRet := offset === jmpOffset && jmpInfo.valid && jmpInfo.bits(2) 127 pd 128 } 129} 130 131 132 133class Ftq_Redirect_SRAMEntry(implicit p: Parameters) extends XSBundle with HasBPUConst { 134 val rasSp = UInt(log2Ceil(RasSize).W) 135 val rasEntry = new RASEntry 136 // val specCnt = Vec(numBr, UInt(10.W)) 137 // val ghist = new ShiftingGlobalHistory 138 val folded_hist = new AllFoldedHistories(foldedGHistInfos) 139 val afhob = new AllAheadFoldedHistoryOldestBits(foldedGHistInfos) 140 val lastBrNumOH = UInt((numBr+1).W) 141 142 val histPtr = new CGHPtr 143 144 def fromBranchPrediction(resp: BranchPredictionBundle) = { 145 assert(!resp.is_minimal) 146 this.rasSp := resp.rasSp 147 this.rasEntry := resp.rasTop 148 this.folded_hist := resp.folded_hist 149 this.afhob := resp.afhob 150 this.lastBrNumOH := resp.lastBrNumOH 151 this.histPtr := resp.histPtr 152 this 153 } 154} 155 156class Ftq_1R_SRAMEntry(implicit p: Parameters) extends XSBundle with HasBPUConst { 157 val meta = UInt(MaxMetaLength.W) 158} 159 160class Ftq_Pred_Info(implicit p: Parameters) extends XSBundle { 161 val target = UInt(VAddrBits.W) 162 val cfiIndex = ValidUndirectioned(UInt(log2Ceil(PredictWidth).W)) 163} 164 165// class FtqEntry(implicit p: Parameters) extends XSBundle with HasBPUConst { 166// val startAddr = UInt(VAddrBits.W) 167// val fallThruAddr = UInt(VAddrBits.W) 168// val isNextMask = Vec(PredictWidth, Bool()) 169 170// val meta = UInt(MaxMetaLength.W) 171 172// val rasSp = UInt(log2Ceil(RasSize).W) 173// val rasEntry = new RASEntry 174// val hist = new ShiftingGlobalHistory 175// val specCnt = Vec(numBr, UInt(10.W)) 176 177// val valids = Vec(PredictWidth, Bool()) 178// val brMask = Vec(PredictWidth, Bool()) 179// // isJalr, isCall, isRet 180// val jmpInfo = ValidUndirectioned(Vec(3, Bool())) 181// val jmpOffset = UInt(log2Ceil(PredictWidth).W) 182 183// val mispredVec = Vec(PredictWidth, Bool()) 184// val cfiIndex = ValidUndirectioned(UInt(log2Ceil(PredictWidth).W)) 185// val target = UInt(VAddrBits.W) 186// } 187 188class FtqRead[T <: Data](private val gen: T)(implicit p: Parameters) extends XSBundle { 189 val ptr = Output(new FtqPtr) 190 val offset = Output(UInt(log2Ceil(PredictWidth).W)) 191 val data = Input(gen) 192 def apply(ptr: FtqPtr, offset: UInt) = { 193 this.ptr := ptr 194 this.offset := offset 195 this.data 196 } 197} 198 199 200class FtqToBpuIO(implicit p: Parameters) extends XSBundle { 201 val redirect = Valid(new BranchPredictionRedirect) 202 val update = Valid(new BranchPredictionUpdate) 203 val enq_ptr = Output(new FtqPtr) 204} 205 206class FtqToIfuIO(implicit p: Parameters) extends XSBundle with HasCircularQueuePtrHelper { 207 val req = Decoupled(new FetchRequestBundle) 208 val redirect = Valid(new Redirect) 209 val flushFromBpu = new Bundle { 210 // when ifu pipeline is not stalled, 211 // a packet from bpu s3 can reach f1 at most 212 val s2 = Valid(new FtqPtr) 213 val s3 = Valid(new FtqPtr) 214 def shouldFlushBy(src: Valid[FtqPtr], idx_to_flush: FtqPtr) = { 215 src.valid && !isAfter(src.bits, idx_to_flush) 216 } 217 def shouldFlushByStage2(idx: FtqPtr) = shouldFlushBy(s2, idx) 218 def shouldFlushByStage3(idx: FtqPtr) = shouldFlushBy(s3, idx) 219 } 220} 221 222trait HasBackendRedirectInfo extends HasXSParameter { 223 def numRedirectPcRead = exuParameters.JmpCnt + exuParameters.AluCnt + 1 224 def isLoadReplay(r: Valid[Redirect]) = r.bits.flushItself() 225} 226 227class FtqToCtrlIO(implicit p: Parameters) extends XSBundle with HasBackendRedirectInfo { 228 val pc_reads = Vec(1 + numRedirectPcRead + 1 + 1, Flipped(new FtqRead(UInt(VAddrBits.W)))) 229 val target_read = Flipped(new FtqRead(UInt(VAddrBits.W))) 230 val redirect_s1_real_pc = Output(UInt(VAddrBits.W)) 231 def getJumpPcRead = pc_reads.head 232 def getRedirectPcRead = VecInit(pc_reads.tail.dropRight(2)) 233 def getRedirectPcReadData = pc_reads.tail.dropRight(2).map(_.data) 234 def getMemPredPcRead = pc_reads.init.last 235 def getRobFlushPcRead = pc_reads.last 236} 237 238 239class FTBEntryGen(implicit p: Parameters) extends XSModule with HasBackendRedirectInfo with HasBPUParameter { 240 val io = IO(new Bundle { 241 val start_addr = Input(UInt(VAddrBits.W)) 242 val old_entry = Input(new FTBEntry) 243 val pd = Input(new Ftq_pd_Entry) 244 val cfiIndex = Flipped(Valid(UInt(log2Ceil(PredictWidth).W))) 245 val target = Input(UInt(VAddrBits.W)) 246 val hit = Input(Bool()) 247 val mispredict_vec = Input(Vec(PredictWidth, Bool())) 248 249 val new_entry = Output(new FTBEntry) 250 val new_br_insert_pos = Output(Vec(numBr, Bool())) 251 val taken_mask = Output(Vec(numBr, Bool())) 252 val mispred_mask = Output(Vec(numBr+1, Bool())) 253 254 // for perf counters 255 val is_init_entry = Output(Bool()) 256 val is_old_entry = Output(Bool()) 257 val is_new_br = Output(Bool()) 258 val is_jalr_target_modified = Output(Bool()) 259 val is_always_taken_modified = Output(Bool()) 260 val is_br_full = Output(Bool()) 261 }) 262 263 // no mispredictions detected at predecode 264 val hit = io.hit 265 val pd = io.pd 266 267 val init_entry = WireInit(0.U.asTypeOf(new FTBEntry)) 268 269 270 val cfi_is_br = pd.brMask(io.cfiIndex.bits) && io.cfiIndex.valid 271 val entry_has_jmp = pd.jmpInfo.valid 272 val new_jmp_is_jal = entry_has_jmp && !pd.jmpInfo.bits(0) && io.cfiIndex.valid 273 val new_jmp_is_jalr = entry_has_jmp && pd.jmpInfo.bits(0) && io.cfiIndex.valid 274 val new_jmp_is_call = entry_has_jmp && pd.jmpInfo.bits(1) && io.cfiIndex.valid 275 val new_jmp_is_ret = entry_has_jmp && pd.jmpInfo.bits(2) && io.cfiIndex.valid 276 val last_jmp_rvi = entry_has_jmp && pd.jmpOffset === (PredictWidth-1).U && !pd.rvcMask.last 277 // val last_br_rvi = cfi_is_br && io.cfiIndex.bits === (PredictWidth-1).U && !pd.rvcMask.last 278 279 val cfi_is_jal = io.cfiIndex.bits === pd.jmpOffset && new_jmp_is_jal 280 val cfi_is_jalr = io.cfiIndex.bits === pd.jmpOffset && new_jmp_is_jalr 281 282 def carryPos = log2Ceil(PredictWidth)+instOffsetBits 283 def getLower(pc: UInt) = pc(carryPos-1, instOffsetBits) 284 // if not hit, establish a new entry 285 init_entry.valid := true.B 286 // tag is left for ftb to assign 287 288 // case br 289 val init_br_slot = init_entry.getSlotForBr(0) 290 when (cfi_is_br) { 291 init_br_slot.valid := true.B 292 init_br_slot.offset := io.cfiIndex.bits 293 init_br_slot.setLowerStatByTarget(io.start_addr, io.target, numBr == 1) 294 init_entry.always_taken(0) := true.B // set to always taken on init 295 } 296 297 // case jmp 298 when (entry_has_jmp) { 299 init_entry.tailSlot.offset := pd.jmpOffset 300 init_entry.tailSlot.valid := new_jmp_is_jal || new_jmp_is_jalr 301 init_entry.tailSlot.setLowerStatByTarget(io.start_addr, Mux(cfi_is_jalr, io.target, pd.jalTarget), isShare=false) 302 } 303 304 val jmpPft = getLower(io.start_addr) +& pd.jmpOffset +& Mux(pd.rvcMask(pd.jmpOffset), 1.U, 2.U) 305 init_entry.pftAddr := Mux(entry_has_jmp && !last_jmp_rvi, jmpPft, getLower(io.start_addr)) 306 init_entry.carry := Mux(entry_has_jmp && !last_jmp_rvi, jmpPft(carryPos-instOffsetBits), true.B) 307 init_entry.isJalr := new_jmp_is_jalr 308 init_entry.isCall := new_jmp_is_call 309 init_entry.isRet := new_jmp_is_ret 310 // that means fall thru points to the middle of an inst 311 init_entry.last_may_be_rvi_call := pd.jmpOffset === (PredictWidth-1).U && !pd.rvcMask(pd.jmpOffset) 312 313 // if hit, check whether a new cfi(only br is possible) is detected 314 val oe = io.old_entry 315 val br_recorded_vec = oe.getBrRecordedVec(io.cfiIndex.bits) 316 val br_recorded = br_recorded_vec.asUInt.orR 317 val is_new_br = cfi_is_br && !br_recorded 318 val new_br_offset = io.cfiIndex.bits 319 // vec(i) means new br will be inserted BEFORE old br(i) 320 val allBrSlotsVec = oe.allSlotsForBr 321 val new_br_insert_onehot = VecInit((0 until numBr).map{ 322 i => i match { 323 case 0 => 324 !allBrSlotsVec(0).valid || new_br_offset < allBrSlotsVec(0).offset 325 case idx => 326 allBrSlotsVec(idx-1).valid && new_br_offset > allBrSlotsVec(idx-1).offset && 327 (!allBrSlotsVec(idx).valid || new_br_offset < allBrSlotsVec(idx).offset) 328 } 329 }) 330 331 val old_entry_modified = WireInit(io.old_entry) 332 for (i <- 0 until numBr) { 333 val slot = old_entry_modified.allSlotsForBr(i) 334 when (new_br_insert_onehot(i)) { 335 slot.valid := true.B 336 slot.offset := new_br_offset 337 slot.setLowerStatByTarget(io.start_addr, io.target, i == numBr-1) 338 old_entry_modified.always_taken(i) := true.B 339 }.elsewhen (new_br_offset > oe.allSlotsForBr(i).offset) { 340 old_entry_modified.always_taken(i) := false.B 341 // all other fields remain unchanged 342 }.otherwise { 343 // case i == 0, remain unchanged 344 if (i != 0) { 345 val noNeedToMoveFromFormerSlot = (i == numBr-1).B && !oe.brSlots.last.valid 346 when (!noNeedToMoveFromFormerSlot) { 347 slot.fromAnotherSlot(oe.allSlotsForBr(i-1)) 348 old_entry_modified.always_taken(i) := oe.always_taken(i) 349 } 350 } 351 } 352 } 353 354 // two circumstances: 355 // 1. oe: | br | j |, new br should be in front of j, thus addr of j should be new pft 356 // 2. oe: | br | br |, new br could be anywhere between, thus new pft is the addr of either 357 // the previous last br or the new br 358 val may_have_to_replace = oe.noEmptySlotForNewBr 359 val pft_need_to_change = is_new_br && may_have_to_replace 360 // it should either be the given last br or the new br 361 when (pft_need_to_change) { 362 val new_pft_offset = 363 Mux(!new_br_insert_onehot.asUInt.orR, 364 new_br_offset, oe.allSlotsForBr.last.offset) 365 366 // set jmp to invalid 367 old_entry_modified.pftAddr := getLower(io.start_addr) + new_pft_offset 368 old_entry_modified.carry := (getLower(io.start_addr) +& new_pft_offset).head(1).asBool 369 old_entry_modified.last_may_be_rvi_call := false.B 370 old_entry_modified.isCall := false.B 371 old_entry_modified.isRet := false.B 372 old_entry_modified.isJalr := false.B 373 } 374 375 val old_entry_jmp_target_modified = WireInit(oe) 376 val old_target = oe.tailSlot.getTarget(io.start_addr) // may be wrong because we store only 20 lowest bits 377 val old_tail_is_jmp = !oe.tailSlot.sharing 378 val jalr_target_modified = cfi_is_jalr && (old_target =/= io.target) && old_tail_is_jmp // TODO: pass full jalr target 379 when (jalr_target_modified) { 380 old_entry_jmp_target_modified.setByJmpTarget(io.start_addr, io.target) 381 old_entry_jmp_target_modified.always_taken := 0.U.asTypeOf(Vec(numBr, Bool())) 382 } 383 384 val old_entry_always_taken = WireInit(oe) 385 val always_taken_modified_vec = Wire(Vec(numBr, Bool())) // whether modified or not 386 for (i <- 0 until numBr) { 387 old_entry_always_taken.always_taken(i) := 388 oe.always_taken(i) && io.cfiIndex.valid && oe.brValids(i) && io.cfiIndex.bits === oe.brOffset(i) 389 always_taken_modified_vec(i) := oe.always_taken(i) && !old_entry_always_taken.always_taken(i) 390 } 391 val always_taken_modified = always_taken_modified_vec.reduce(_||_) 392 393 394 395 val derived_from_old_entry = 396 Mux(is_new_br, old_entry_modified, 397 Mux(jalr_target_modified, old_entry_jmp_target_modified, old_entry_always_taken)) 398 399 400 io.new_entry := Mux(!hit, init_entry, derived_from_old_entry) 401 402 io.new_br_insert_pos := new_br_insert_onehot 403 io.taken_mask := VecInit((io.new_entry.brOffset zip io.new_entry.brValids).map{ 404 case (off, v) => io.cfiIndex.bits === off && io.cfiIndex.valid && v 405 }) 406 for (i <- 0 until numBr) { 407 io.mispred_mask(i) := io.new_entry.brValids(i) && io.mispredict_vec(io.new_entry.brOffset(i)) 408 } 409 io.mispred_mask.last := io.new_entry.jmpValid && io.mispredict_vec(pd.jmpOffset) 410 411 // for perf counters 412 io.is_init_entry := !hit 413 io.is_old_entry := hit && !is_new_br && !jalr_target_modified && !always_taken_modified 414 io.is_new_br := hit && is_new_br 415 io.is_jalr_target_modified := hit && jalr_target_modified 416 io.is_always_taken_modified := hit && always_taken_modified 417 io.is_br_full := hit && is_new_br && may_have_to_replace 418} 419 420class Ftq(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper 421 with HasBackendRedirectInfo with BPUUtils with HasBPUConst with HasPerfEvents 422 with HasICacheParameters{ 423 val io = IO(new Bundle { 424 val fromBpu = Flipped(new BpuToFtqIO) 425 val fromIfu = Flipped(new IfuToFtqIO) 426 val fromBackend = Flipped(new CtrlToFtqIO) 427 428 val toBpu = new FtqToBpuIO 429 val toIfu = new FtqToIfuIO 430 val toBackend = new FtqToCtrlIO 431 432 val toPrefetch = new FtqPrefechBundle 433 434 val bpuInfo = new Bundle { 435 val bpRight = Output(UInt(XLEN.W)) 436 val bpWrong = Output(UInt(XLEN.W)) 437 } 438 }) 439 io.bpuInfo := DontCare 440 441 val backendRedirect = Wire(Valid(new Redirect)) 442 val backendRedirectReg = RegNext(backendRedirect) 443 444 val stage2Flush = backendRedirect.valid 445 val backendFlush = stage2Flush || RegNext(stage2Flush) 446 val ifuFlush = Wire(Bool()) 447 448 val flush = stage2Flush || RegNext(stage2Flush) 449 450 val allowBpuIn, allowToIfu = WireInit(false.B) 451 val flushToIfu = !allowToIfu 452 allowBpuIn := !ifuFlush && !backendRedirect.valid && !backendRedirectReg.valid 453 allowToIfu := !ifuFlush && !backendRedirect.valid && !backendRedirectReg.valid 454 455 val bpuPtr, ifuPtr, ifuWbPtr, commPtr = RegInit(FtqPtr(false.B, 0.U)) 456 val validEntries = distanceBetween(bpuPtr, commPtr) 457 458 // ********************************************************************** 459 // **************************** enq from bpu **************************** 460 // ********************************************************************** 461 val new_entry_ready = validEntries < FtqSize.U 462 io.fromBpu.resp.ready := new_entry_ready 463 464 val bpu_s2_resp = io.fromBpu.resp.bits.s2 465 val bpu_s3_resp = io.fromBpu.resp.bits.s3 466 val bpu_s2_redirect = bpu_s2_resp.valid && bpu_s2_resp.hasRedirect 467 val bpu_s3_redirect = bpu_s3_resp.valid && bpu_s3_resp.hasRedirect 468 469 io.toBpu.enq_ptr := bpuPtr 470 val enq_fire = io.fromBpu.resp.fire() && allowBpuIn // from bpu s1 471 val bpu_in_fire = (io.fromBpu.resp.fire() || bpu_s2_redirect || bpu_s3_redirect) && allowBpuIn 472 473 val bpu_in_resp = io.fromBpu.resp.bits.selectedResp 474 val bpu_in_stage = io.fromBpu.resp.bits.selectedRespIdx 475 val bpu_in_resp_ptr = Mux(bpu_in_stage === BP_S1, bpuPtr, bpu_in_resp.ftq_idx) 476 val bpu_in_resp_idx = bpu_in_resp_ptr.value 477 478 // read ports: jumpPc + redirects + loadPred + robFlush + ifuReq1 + ifuReq2 + commitUpdate 479 val ftq_pc_mem = Module(new SyncDataModuleTemplate(new Ftq_RF_Components, FtqSize, 1+numRedirectPcRead+2+1+1+1, 1)) 480 // resp from uBTB 481 ftq_pc_mem.io.wen(0) := bpu_in_fire 482 ftq_pc_mem.io.waddr(0) := bpu_in_resp_idx 483 ftq_pc_mem.io.wdata(0).fromBranchPrediction(bpu_in_resp) 484 485 // ifuRedirect + backendRedirect + commit 486 val ftq_redirect_sram = Module(new FtqNRSRAM(new Ftq_Redirect_SRAMEntry, 1+1+1)) 487 // these info is intended to enq at the last stage of bpu 488 ftq_redirect_sram.io.wen := io.fromBpu.resp.bits.lastStage.valid 489 ftq_redirect_sram.io.waddr := io.fromBpu.resp.bits.lastStage.ftq_idx.value 490 ftq_redirect_sram.io.wdata.fromBranchPrediction(io.fromBpu.resp.bits.lastStage) 491 println(f"ftq redirect SRAM: entry ${ftq_redirect_sram.io.wdata.getWidth} * ${FtqSize} * 3") 492 println(f"ftq redirect SRAM: ahead fh ${ftq_redirect_sram.io.wdata.afhob.getWidth} * ${FtqSize} * 3") 493 494 val ftq_meta_1r_sram = Module(new FtqNRSRAM(new Ftq_1R_SRAMEntry, 1)) 495 // these info is intended to enq at the last stage of bpu 496 ftq_meta_1r_sram.io.wen := io.fromBpu.resp.bits.lastStage.valid 497 ftq_meta_1r_sram.io.waddr := io.fromBpu.resp.bits.lastStage.ftq_idx.value 498 ftq_meta_1r_sram.io.wdata.meta := io.fromBpu.resp.bits.meta 499 // ifuRedirect + backendRedirect + commit 500 val ftb_entry_mem = Module(new SyncDataModuleTemplate(new FTBEntry, FtqSize, 1+1+1, 1)) 501 ftb_entry_mem.io.wen(0) := io.fromBpu.resp.bits.lastStage.valid 502 ftb_entry_mem.io.waddr(0) := io.fromBpu.resp.bits.lastStage.ftq_idx.value 503 ftb_entry_mem.io.wdata(0) := io.fromBpu.resp.bits.lastStage.ftb_entry 504 505 506 // multi-write 507 val update_target = Reg(Vec(FtqSize, UInt(VAddrBits.W))) // could be taken target or fallThrough 508 val cfiIndex_vec = Reg(Vec(FtqSize, ValidUndirectioned(UInt(log2Ceil(PredictWidth).W)))) 509 val mispredict_vec = Reg(Vec(FtqSize, Vec(PredictWidth, Bool()))) 510 val pred_stage = Reg(Vec(FtqSize, UInt(2.W))) 511 512 val c_invalid :: c_valid :: c_commited :: Nil = Enum(3) 513 val commitStateQueue = RegInit(VecInit(Seq.fill(FtqSize) { 514 VecInit(Seq.fill(PredictWidth)(c_invalid)) 515 })) 516 517 val f_to_send :: f_sent :: Nil = Enum(2) 518 val entry_fetch_status = RegInit(VecInit(Seq.fill(FtqSize)(f_sent))) 519 520 val h_not_hit :: h_false_hit :: h_hit :: Nil = Enum(3) 521 val entry_hit_status = RegInit(VecInit(Seq.fill(FtqSize)(h_not_hit))) 522 523 // modify registers one cycle later to cut critical path 524 val last_cycle_bpu_in = RegNext(bpu_in_fire) 525 val last_cycle_bpu_in_idx = RegNext(bpu_in_resp_idx) 526 val last_cycle_update_target = RegNext(bpu_in_resp.getTarget) 527 val last_cycle_cfiIndex = RegNext(bpu_in_resp.cfiIndex) 528 val last_cycle_bpu_in_stage = RegNext(bpu_in_stage) 529 when (last_cycle_bpu_in) { 530 entry_fetch_status(last_cycle_bpu_in_idx) := f_to_send 531 commitStateQueue(last_cycle_bpu_in_idx) := VecInit(Seq.fill(PredictWidth)(c_invalid)) 532 cfiIndex_vec(last_cycle_bpu_in_idx) := last_cycle_cfiIndex 533 mispredict_vec(last_cycle_bpu_in_idx) := WireInit(VecInit(Seq.fill(PredictWidth)(false.B))) 534 update_target(last_cycle_bpu_in_idx) := last_cycle_update_target 535 pred_stage(last_cycle_bpu_in_idx) := last_cycle_bpu_in_stage 536 } 537 538 539 bpuPtr := bpuPtr + enq_fire 540 ifuPtr := ifuPtr + (io.toIfu.req.fire && allowToIfu) 541 542 // only use ftb result to assign hit status 543 when (bpu_s2_resp.valid) { 544 entry_hit_status(bpu_s2_resp.ftq_idx.value) := Mux(bpu_s2_resp.full_pred.hit, h_hit, h_not_hit) 545 } 546 547 548 io.toIfu.flushFromBpu.s2.valid := bpu_s2_redirect 549 io.toIfu.flushFromBpu.s2.bits := bpu_s2_resp.ftq_idx 550 when (bpu_s2_resp.valid && bpu_s2_resp.hasRedirect) { 551 bpuPtr := bpu_s2_resp.ftq_idx + 1.U 552 // only when ifuPtr runs ahead of bpu s2 resp should we recover it 553 when (!isBefore(ifuPtr, bpu_s2_resp.ftq_idx)) { 554 ifuPtr := bpu_s2_resp.ftq_idx 555 } 556 } 557 558 io.toIfu.flushFromBpu.s3.valid := bpu_s3_redirect 559 io.toIfu.flushFromBpu.s3.bits := bpu_s3_resp.ftq_idx 560 when (bpu_s3_resp.valid && bpu_s3_resp.hasRedirect) { 561 bpuPtr := bpu_s3_resp.ftq_idx + 1.U 562 // only when ifuPtr runs ahead of bpu s2 resp should we recover it 563 when (!isBefore(ifuPtr, bpu_s3_resp.ftq_idx)) { 564 ifuPtr := bpu_s3_resp.ftq_idx 565 } 566 } 567 568 XSError(isBefore(bpuPtr, ifuPtr) && !isFull(bpuPtr, ifuPtr), "\nifuPtr is before bpuPtr!\n") 569 570 // **************************************************************** 571 // **************************** to ifu **************************** 572 // **************************************************************** 573 val bpu_in_bypass_buf = RegEnable(ftq_pc_mem.io.wdata(0), bpu_in_fire) 574 val bpu_in_bypass_ptr = RegNext(bpu_in_resp_ptr) 575 val last_cycle_to_ifu_fire = RegNext(io.toIfu.req.fire) 576 577 // read pc and target 578 ftq_pc_mem.io.raddr.init.init.last := ifuPtr.value 579 ftq_pc_mem.io.raddr.init.last := (ifuPtr+1.U).value 580 581 io.toIfu.req.bits.ftqIdx := ifuPtr 582 583 584 val toIfuPcBundle = Wire(new Ftq_RF_Components) 585 val entry_is_to_send = WireInit(entry_fetch_status(ifuPtr.value) === f_to_send) 586 val entry_next_addr = WireInit(update_target(ifuPtr.value)) 587 val entry_ftq_offset = WireInit(cfiIndex_vec(ifuPtr.value)) 588 589 590 when (last_cycle_bpu_in && bpu_in_bypass_ptr === ifuPtr) { 591 toIfuPcBundle := bpu_in_bypass_buf 592 entry_is_to_send := true.B 593 entry_next_addr := last_cycle_update_target 594 entry_ftq_offset := last_cycle_cfiIndex 595 }.elsewhen (last_cycle_to_ifu_fire) { 596 toIfuPcBundle := ftq_pc_mem.io.rdata.init.last 597 entry_is_to_send := RegNext(entry_fetch_status((ifuPtr+1.U).value) === f_to_send) || 598 RegNext(last_cycle_bpu_in && bpu_in_bypass_ptr === (ifuPtr+1.U)) // reduce potential bubbles 599 }.otherwise { 600 toIfuPcBundle := ftq_pc_mem.io.rdata.init.init.last 601 entry_is_to_send := RegNext(entry_fetch_status(ifuPtr.value) === f_to_send) 602 } 603 604 io.toIfu.req.valid := entry_is_to_send && ifuPtr =/= bpuPtr 605 io.toIfu.req.bits.nextStartAddr := entry_next_addr 606 io.toIfu.req.bits.ftqOffset := entry_ftq_offset 607 io.toIfu.req.bits.fromFtqPcBundle(toIfuPcBundle) 608 609 // when fall through is smaller in value than start address, there must be a false hit 610 when (toIfuPcBundle.fallThruError && entry_hit_status(ifuPtr.value) === h_hit) { 611 when (io.toIfu.req.fire && 612 !(bpu_s2_redirect && bpu_s2_resp.ftq_idx === ifuPtr) && 613 !(bpu_s3_redirect && bpu_s3_resp.ftq_idx === ifuPtr) 614 ) { 615 entry_hit_status(ifuPtr.value) := h_false_hit 616 // XSError(true.B, "FTB false hit by fallThroughError, startAddr: %x, fallTHru: %x\n", io.toIfu.req.bits.startAddr, io.toIfu.req.bits.nextStartAddr) 617 } 618 XSDebug(true.B, "fallThruError! start:%x, fallThru:%x\n", io.toIfu.req.bits.startAddr, io.toIfu.req.bits.nextStartAddr) 619 } 620 621 XSPerfAccumulate(f"fall_through_error_to_ifu", toIfuPcBundle.fallThruError && entry_hit_status(ifuPtr.value) === h_hit && 622 io.toIfu.req.fire && !(bpu_s2_redirect && bpu_s2_resp.ftq_idx === ifuPtr) && !(bpu_s3_redirect && bpu_s3_resp.ftq_idx === ifuPtr)) 623 624 val ifu_req_should_be_flushed = 625 io.toIfu.flushFromBpu.shouldFlushByStage2(io.toIfu.req.bits.ftqIdx) || 626 io.toIfu.flushFromBpu.shouldFlushByStage3(io.toIfu.req.bits.ftqIdx) 627 628 when (io.toIfu.req.fire && !ifu_req_should_be_flushed) { 629 entry_fetch_status(ifuPtr.value) := f_sent 630 } 631 632 // ********************************************************************* 633 // **************************** wb from ifu **************************** 634 // ********************************************************************* 635 val pdWb = io.fromIfu.pdWb 636 val pds = pdWb.bits.pd 637 val ifu_wb_valid = pdWb.valid 638 val ifu_wb_idx = pdWb.bits.ftqIdx.value 639 // read ports: commit update 640 val ftq_pd_mem = Module(new SyncDataModuleTemplate(new Ftq_pd_Entry, FtqSize, 1, 1)) 641 ftq_pd_mem.io.wen(0) := ifu_wb_valid 642 ftq_pd_mem.io.waddr(0) := pdWb.bits.ftqIdx.value 643 ftq_pd_mem.io.wdata(0).fromPdWb(pdWb.bits) 644 645 val hit_pd_valid = entry_hit_status(ifu_wb_idx) === h_hit && ifu_wb_valid 646 val hit_pd_mispred = hit_pd_valid && pdWb.bits.misOffset.valid 647 val hit_pd_mispred_reg = RegNext(hit_pd_mispred, init=false.B) 648 val pd_reg = RegEnable(pds, pdWb.valid) 649 val start_pc_reg = RegEnable(pdWb.bits.pc(0), pdWb.valid) 650 val wb_idx_reg = RegEnable(ifu_wb_idx, pdWb.valid) 651 652 when (ifu_wb_valid) { 653 val comm_stq_wen = VecInit(pds.map(_.valid).zip(pdWb.bits.instrRange).map{ 654 case (v, inRange) => v && inRange 655 }) 656 (commitStateQueue(ifu_wb_idx) zip comm_stq_wen).map{ 657 case (qe, v) => when (v) { qe := c_valid } 658 } 659 } 660 661 ifuWbPtr := ifuWbPtr + ifu_wb_valid 662 663 ftb_entry_mem.io.raddr.head := ifu_wb_idx 664 val has_false_hit = WireInit(false.B) 665 when (RegNext(hit_pd_valid)) { 666 // check for false hit 667 val pred_ftb_entry = ftb_entry_mem.io.rdata.head 668 val brSlots = pred_ftb_entry.brSlots 669 val tailSlot = pred_ftb_entry.tailSlot 670 // we check cfis that bpu predicted 671 672 // bpu predicted branches but denied by predecode 673 val br_false_hit = 674 brSlots.map{ 675 s => s.valid && !(pd_reg(s.offset).valid && pd_reg(s.offset).isBr) 676 }.reduce(_||_) || 677 (tailSlot.valid && pred_ftb_entry.tailSlot.sharing && 678 !(pd_reg(tailSlot.offset).valid && pd_reg(tailSlot.offset).isBr)) 679 680 val jmpOffset = tailSlot.offset 681 val jmp_pd = pd_reg(jmpOffset) 682 val jal_false_hit = pred_ftb_entry.jmpValid && 683 ((pred_ftb_entry.isJal && !(jmp_pd.valid && jmp_pd.isJal)) || 684 (pred_ftb_entry.isJalr && !(jmp_pd.valid && jmp_pd.isJalr)) || 685 (pred_ftb_entry.isCall && !(jmp_pd.valid && jmp_pd.isCall)) || 686 (pred_ftb_entry.isRet && !(jmp_pd.valid && jmp_pd.isRet)) 687 ) 688 689 has_false_hit := br_false_hit || jal_false_hit || hit_pd_mispred_reg 690 XSDebug(has_false_hit, "FTB false hit by br or jal or hit_pd, startAddr: %x\n", pdWb.bits.pc(0)) 691 692 // assert(!has_false_hit) 693 } 694 695 when (has_false_hit) { 696 entry_hit_status(wb_idx_reg) := h_false_hit 697 } 698 699 700 // ********************************************************************** 701 // **************************** backend read **************************** 702 // ********************************************************************** 703 704 // pc reads 705 for ((req, i) <- io.toBackend.pc_reads.zipWithIndex) { 706 ftq_pc_mem.io.raddr(i) := req.ptr.value 707 req.data := ftq_pc_mem.io.rdata(i).getPc(RegNext(req.offset)) 708 } 709 // target read 710 io.toBackend.target_read.data := RegNext(update_target(io.toBackend.target_read.ptr.value)) 711 712 // ******************************************************************************* 713 // **************************** redirect from backend **************************** 714 // ******************************************************************************* 715 716 // redirect read cfiInfo, couples to redirectGen s2 717 ftq_redirect_sram.io.ren.init.last := backendRedirect.valid 718 ftq_redirect_sram.io.raddr.init.last := backendRedirect.bits.ftqIdx.value 719 720 ftb_entry_mem.io.raddr.init.last := backendRedirect.bits.ftqIdx.value 721 722 val stage3CfiInfo = ftq_redirect_sram.io.rdata.init.last 723 val fromBackendRedirect = WireInit(backendRedirectReg) 724 val backendRedirectCfi = fromBackendRedirect.bits.cfiUpdate 725 backendRedirectCfi.fromFtqRedirectSram(stage3CfiInfo) 726 727 val r_ftb_entry = ftb_entry_mem.io.rdata.init.last 728 val r_ftqOffset = fromBackendRedirect.bits.ftqOffset 729 730 when (entry_hit_status(fromBackendRedirect.bits.ftqIdx.value) === h_hit) { 731 backendRedirectCfi.shift := PopCount(r_ftb_entry.getBrMaskByOffset(r_ftqOffset)) +& 732 (backendRedirectCfi.pd.isBr && !r_ftb_entry.brIsSaved(r_ftqOffset) && 733 !r_ftb_entry.newBrCanNotInsert(r_ftqOffset)) 734 735 backendRedirectCfi.addIntoHist := backendRedirectCfi.pd.isBr && (r_ftb_entry.brIsSaved(r_ftqOffset) || 736 !r_ftb_entry.newBrCanNotInsert(r_ftqOffset)) 737 }.otherwise { 738 backendRedirectCfi.shift := (backendRedirectCfi.pd.isBr && backendRedirectCfi.taken).asUInt 739 backendRedirectCfi.addIntoHist := backendRedirectCfi.pd.isBr.asUInt 740 } 741 742 743 // *************************************************************************** 744 // **************************** redirect from ifu **************************** 745 // *************************************************************************** 746 val fromIfuRedirect = WireInit(0.U.asTypeOf(Valid(new Redirect))) 747 fromIfuRedirect.valid := pdWb.valid && pdWb.bits.misOffset.valid && !backendFlush 748 fromIfuRedirect.bits.ftqIdx := pdWb.bits.ftqIdx 749 fromIfuRedirect.bits.ftqOffset := pdWb.bits.misOffset.bits 750 fromIfuRedirect.bits.level := RedirectLevel.flushAfter 751 752 val ifuRedirectCfiUpdate = fromIfuRedirect.bits.cfiUpdate 753 ifuRedirectCfiUpdate.pc := pdWb.bits.pc(pdWb.bits.misOffset.bits) 754 ifuRedirectCfiUpdate.pd := pdWb.bits.pd(pdWb.bits.misOffset.bits) 755 ifuRedirectCfiUpdate.predTaken := cfiIndex_vec(pdWb.bits.ftqIdx.value).valid 756 ifuRedirectCfiUpdate.target := pdWb.bits.target 757 ifuRedirectCfiUpdate.taken := pdWb.bits.cfiOffset.valid 758 ifuRedirectCfiUpdate.isMisPred := pdWb.bits.misOffset.valid 759 760 val ifuRedirectReg = RegNext(fromIfuRedirect, init=0.U.asTypeOf(Valid(new Redirect))) 761 val ifuRedirectToBpu = WireInit(ifuRedirectReg) 762 ifuFlush := fromIfuRedirect.valid || ifuRedirectToBpu.valid 763 764 ftq_redirect_sram.io.ren.head := fromIfuRedirect.valid 765 ftq_redirect_sram.io.raddr.head := fromIfuRedirect.bits.ftqIdx.value 766 767 ftb_entry_mem.io.raddr.head := fromIfuRedirect.bits.ftqIdx.value 768 769 val toBpuCfi = ifuRedirectToBpu.bits.cfiUpdate 770 toBpuCfi.fromFtqRedirectSram(ftq_redirect_sram.io.rdata.head) 771 when (ifuRedirectReg.bits.cfiUpdate.pd.isRet) { 772 toBpuCfi.target := toBpuCfi.rasEntry.retAddr 773 } 774 775 // ********************************************************************* 776 // **************************** wb from exu **************************** 777 // ********************************************************************* 778 779 class RedirectGen(implicit p: Parameters) extends XSModule 780 with HasCircularQueuePtrHelper { 781 val io = IO(new Bundle { 782 val in = Flipped((new CtrlToFtqIO).for_redirect_gen) 783 val stage1Pc = Input(Vec(numRedirectPcRead, UInt(VAddrBits.W))) 784 val out = Valid(new Redirect) 785 val s1_real_pc = Output(UInt(VAddrBits.W)) 786 val debug_diff = Flipped(Valid(new Redirect)) 787 }) 788 val s1_jumpTarget = io.in.s1_jumpTarget 789 val s1_uop = io.in.s1_oldest_exu_output.bits.uop 790 val s1_imm12_reg = s1_uop.ctrl.imm(11,0) 791 val s1_pd = s1_uop.cf.pd 792 val s1_isReplay = io.in.s1_redirect_onehot.last 793 val s1_isJump = io.in.s1_redirect_onehot.head 794 val real_pc = Mux1H(io.in.s1_redirect_onehot, io.stage1Pc) 795 val brTarget = real_pc + SignExt(ImmUnion.B.toImm32(s1_imm12_reg), XLEN) 796 val snpc = real_pc + Mux(s1_pd.isRVC, 2.U, 4.U) 797 val target = Mux(s1_isReplay, 798 real_pc, 799 Mux(io.in.s1_oldest_redirect.bits.cfiUpdate.taken, 800 Mux(s1_isJump, io.in.s1_jumpTarget, brTarget), 801 snpc 802 ) 803 ) 804 805 val redirectGenRes = WireInit(io.in.rawRedirect) 806 redirectGenRes.bits.cfiUpdate.pc := real_pc 807 redirectGenRes.bits.cfiUpdate.pd := s1_pd 808 redirectGenRes.bits.cfiUpdate.target := target 809 810 val realRedirect = Wire(Valid(new Redirect)) 811 realRedirect.valid := redirectGenRes.valid || io.in.flushRedirect.valid 812 realRedirect.bits := Mux(io.in.flushRedirect.valid, io.in.flushRedirect.bits, redirectGenRes.bits) 813 814 when (io.in.flushRedirect.valid) { 815 realRedirect.bits.level := RedirectLevel.flush 816 realRedirect.bits.cfiUpdate.target := io.in.frontendFlushTarget 817 } 818 819 io.out := realRedirect 820 io.s1_real_pc := real_pc 821 XSError((io.debug_diff.valid || realRedirect.valid) && io.debug_diff.asUInt =/= io.out.asUInt, "redirect wrong") 822 823 } 824 825 val redirectGen = Module(new RedirectGen) 826 redirectGen.io.in <> io.fromBackend.for_redirect_gen 827 redirectGen.io.stage1Pc := io.toBackend.getRedirectPcReadData 828 redirectGen.io.debug_diff := io.fromBackend.redirect 829 backendRedirect := redirectGen.io.out 830 831 io.toBackend.redirect_s1_real_pc := redirectGen.io.s1_real_pc 832 833 def extractRedirectInfo(wb: Valid[Redirect]) = { 834 val ftqIdx = wb.bits.ftqIdx.value 835 val ftqOffset = wb.bits.ftqOffset 836 val taken = wb.bits.cfiUpdate.taken 837 val mispred = wb.bits.cfiUpdate.isMisPred 838 (wb.valid, ftqIdx, ftqOffset, taken, mispred) 839 } 840 841 // fix mispredict entry 842 val lastIsMispredict = RegNext( 843 backendRedirect.valid && backendRedirect.bits.level === RedirectLevel.flushAfter, init = false.B 844 ) 845 846 def updateCfiInfo(redirect: Valid[Redirect], isBackend: Boolean = true) = { 847 val (r_valid, r_idx, r_offset, r_taken, r_mispred) = extractRedirectInfo(redirect) 848 val cfiIndex_bits_wen = r_valid && r_taken && r_offset < cfiIndex_vec(r_idx).bits 849 val cfiIndex_valid_wen = r_valid && r_offset === cfiIndex_vec(r_idx).bits 850 when (cfiIndex_bits_wen || cfiIndex_valid_wen) { 851 cfiIndex_vec(r_idx).valid := cfiIndex_bits_wen || cfiIndex_valid_wen && r_taken 852 } 853 when (cfiIndex_bits_wen) { 854 cfiIndex_vec(r_idx).bits := r_offset 855 } 856 update_target(r_idx) := redirect.bits.cfiUpdate.target 857 if (isBackend) { 858 mispredict_vec(r_idx)(r_offset) := r_mispred 859 } 860 } 861 862 when(backendRedirectReg.valid && lastIsMispredict) { 863 updateCfiInfo(backendRedirectReg) 864 }.elsewhen (ifuRedirectToBpu.valid) { 865 updateCfiInfo(ifuRedirectToBpu, isBackend=false) 866 } 867 868 // *********************************************************************************** 869 // **************************** flush ptr and state queue **************************** 870 // *********************************************************************************** 871 872 val redirectVec = VecInit(backendRedirect, fromIfuRedirect) 873 874 // when redirect, we should reset ptrs and status queues 875 when(redirectVec.map(r => r.valid).reduce(_||_)){ 876 val r = PriorityMux(redirectVec.map(r => (r.valid -> r.bits))) 877 val notIfu = redirectVec.dropRight(1).map(r => r.valid).reduce(_||_) 878 val (idx, offset, flushItSelf) = (r.ftqIdx, r.ftqOffset, RedirectLevel.flushItself(r.level)) 879 val next = idx + 1.U 880 bpuPtr := next 881 ifuPtr := next 882 ifuWbPtr := next 883 when (notIfu) { 884 commitStateQueue(idx.value).zipWithIndex.foreach({ case (s, i) => 885 when(i.U > offset || i.U === offset && flushItSelf){ 886 s := c_invalid 887 } 888 }) 889 } 890 } 891 892 // only the valid bit is actually needed 893 io.toIfu.redirect.bits := backendRedirect.bits 894 io.toIfu.redirect.valid := stage2Flush 895 896 // commit 897 for (c <- io.fromBackend.rob_commits) { 898 when(c.valid) { 899 commitStateQueue(c.bits.ftqIdx.value)(c.bits.ftqOffset) := c_commited 900 // TODO: remove this 901 // For instruction fusions, we also update the next instruction 902 when (c.bits.commitType === 4.U) { 903 commitStateQueue(c.bits.ftqIdx.value)(c.bits.ftqOffset + 1.U) := c_commited 904 }.elsewhen(c.bits.commitType === 5.U) { 905 commitStateQueue(c.bits.ftqIdx.value)(c.bits.ftqOffset + 2.U) := c_commited 906 }.elsewhen(c.bits.commitType === 6.U) { 907 val index = (c.bits.ftqIdx + 1.U).value 908 commitStateQueue(index)(0) := c_commited 909 }.elsewhen(c.bits.commitType === 7.U) { 910 val index = (c.bits.ftqIdx + 1.U).value 911 commitStateQueue(index)(1) := c_commited 912 } 913 } 914 } 915 916 // **************************************************************** 917 // **************************** to bpu **************************** 918 // **************************************************************** 919 920 io.toBpu.redirect <> Mux(fromBackendRedirect.valid, fromBackendRedirect, ifuRedirectToBpu) 921 922 val may_have_stall_from_bpu = RegInit(false.B) 923 val canCommit = commPtr =/= ifuWbPtr && !may_have_stall_from_bpu && 924 Cat(commitStateQueue(commPtr.value).map(s => { 925 s === c_invalid || s === c_commited 926 })).andR() 927 928 // commit reads 929 ftq_pc_mem.io.raddr.last := commPtr.value 930 val commit_pc_bundle = ftq_pc_mem.io.rdata.last 931 ftq_pd_mem.io.raddr.last := commPtr.value 932 val commit_pd = ftq_pd_mem.io.rdata.last 933 ftq_redirect_sram.io.ren.last := canCommit 934 ftq_redirect_sram.io.raddr.last := commPtr.value 935 val commit_spec_meta = ftq_redirect_sram.io.rdata.last 936 ftq_meta_1r_sram.io.ren(0) := canCommit 937 ftq_meta_1r_sram.io.raddr(0) := commPtr.value 938 val commit_meta = ftq_meta_1r_sram.io.rdata(0) 939 ftb_entry_mem.io.raddr.last := commPtr.value 940 val commit_ftb_entry = ftb_entry_mem.io.rdata.last 941 942 // need one cycle to read mem and srams 943 val do_commit_ptr = RegNext(commPtr) 944 val do_commit = RegNext(canCommit, init=false.B) 945 when (canCommit) { commPtr := commPtr + 1.U } 946 val commit_state = RegNext(commitStateQueue(commPtr.value)) 947 val can_commit_cfi = WireInit(cfiIndex_vec(commPtr.value)) 948 when (commitStateQueue(commPtr.value)(can_commit_cfi.bits) =/= c_commited) { 949 can_commit_cfi.valid := false.B 950 } 951 val commit_cfi = RegNext(can_commit_cfi) 952 953 val commit_mispredict = VecInit((RegNext(mispredict_vec(commPtr.value)) zip commit_state).map { 954 case (mis, state) => mis && state === c_commited 955 }) 956 val can_commit_hit = entry_hit_status(commPtr.value) 957 val commit_hit = RegNext(can_commit_hit) 958 val commit_target = RegNext(update_target(commPtr.value)) 959 val commit_stage = RegNext(pred_stage(commPtr.value)) 960 val commit_valid = commit_hit === h_hit || commit_cfi.valid // hit or taken 961 962 val to_bpu_hit = can_commit_hit === h_hit || can_commit_hit === h_false_hit 963 may_have_stall_from_bpu := can_commit_cfi.valid && !to_bpu_hit && !may_have_stall_from_bpu 964 965 io.toBpu.update := DontCare 966 io.toBpu.update.valid := commit_valid && do_commit 967 val update = io.toBpu.update.bits 968 update.false_hit := commit_hit === h_false_hit 969 update.pc := commit_pc_bundle.startAddr 970 update.meta := commit_meta.meta 971 update.full_target := commit_target 972 update.from_stage := commit_stage 973 update.fromFtqRedirectSram(commit_spec_meta) 974 975 val commit_real_hit = commit_hit === h_hit 976 val update_ftb_entry = update.ftb_entry 977 978 val ftbEntryGen = Module(new FTBEntryGen).io 979 ftbEntryGen.start_addr := commit_pc_bundle.startAddr 980 ftbEntryGen.old_entry := commit_ftb_entry 981 ftbEntryGen.pd := commit_pd 982 ftbEntryGen.cfiIndex := commit_cfi 983 ftbEntryGen.target := commit_target 984 ftbEntryGen.hit := commit_real_hit 985 ftbEntryGen.mispredict_vec := commit_mispredict 986 987 update_ftb_entry := ftbEntryGen.new_entry 988 update.new_br_insert_pos := ftbEntryGen.new_br_insert_pos 989 update.mispred_mask := ftbEntryGen.mispred_mask 990 update.old_entry := ftbEntryGen.is_old_entry 991 update.pred_hit := commit_hit === h_hit || commit_hit === h_false_hit 992 993 update.is_minimal := false.B 994 update.full_pred.fromFtbEntry(ftbEntryGen.new_entry, update.pc) 995 update.full_pred.br_taken_mask := ftbEntryGen.taken_mask 996 update.full_pred.jalr_target := commit_target 997 update.full_pred.hit := true.B 998 when (update.full_pred.is_jalr) { 999 update.full_pred.targets.last := commit_target 1000 } 1001 1002 // **************************************************************** 1003 // *********************** to prefetch **************************** 1004 // **************************************************************** 1005 1006 if(cacheParams.hasPrefetch){ 1007 val prefetchPtr = RegInit(FtqPtr(false.B, 0.U)) 1008 prefetchPtr := prefetchPtr + io.toPrefetch.req.fire() 1009 1010 when (bpu_s2_resp.valid && bpu_s2_resp.hasRedirect && !isBefore(prefetchPtr, bpu_s2_resp.ftq_idx)) { 1011 prefetchPtr := bpu_s2_resp.ftq_idx 1012 } 1013 1014 when (bpu_s3_resp.valid && bpu_s3_resp.hasRedirect && !isBefore(prefetchPtr, bpu_s3_resp.ftq_idx)) { 1015 prefetchPtr := bpu_s3_resp.ftq_idx 1016 // XSError(true.B, "\ns3_redirect mechanism not implemented!\n") 1017 } 1018 1019 1020 val prefetch_is_to_send = WireInit(entry_fetch_status(prefetchPtr.value) === f_to_send) 1021 val prefetch_addr = WireInit(update_target(prefetchPtr.value)) 1022 1023 when (last_cycle_bpu_in && bpu_in_bypass_ptr === prefetchPtr) { 1024 prefetch_is_to_send := true.B 1025 prefetch_addr := last_cycle_update_target 1026 } 1027 io.toPrefetch.req.valid := prefetchPtr =/= bpuPtr && prefetch_is_to_send 1028 io.toPrefetch.req.bits.target := prefetch_addr 1029 1030 when(redirectVec.map(r => r.valid).reduce(_||_)){ 1031 val r = PriorityMux(redirectVec.map(r => (r.valid -> r.bits))) 1032 val next = r.ftqIdx + 1.U 1033 prefetchPtr := next 1034 } 1035 1036 XSError(isBefore(bpuPtr, prefetchPtr) && !isFull(bpuPtr, prefetchPtr), "\nprefetchPtr is before bpuPtr!\n") 1037 XSError(isBefore(prefetchPtr, ifuPtr) && !isFull(ifuPtr, prefetchPtr), "\nifuPtr is before prefetchPtr!\n") 1038 } 1039 else { 1040 io.toPrefetch.req <> DontCare 1041 } 1042 1043 // ****************************************************************************** 1044 // **************************** commit perf counters **************************** 1045 // ****************************************************************************** 1046 1047 val commit_inst_mask = VecInit(commit_state.map(c => c === c_commited && do_commit)).asUInt 1048 val commit_mispred_mask = commit_mispredict.asUInt 1049 val commit_not_mispred_mask = ~commit_mispred_mask 1050 1051 val commit_br_mask = commit_pd.brMask.asUInt 1052 val commit_jmp_mask = UIntToOH(commit_pd.jmpOffset) & Fill(PredictWidth, commit_pd.jmpInfo.valid.asTypeOf(UInt(1.W))) 1053 val commit_cfi_mask = (commit_br_mask | commit_jmp_mask) 1054 1055 val mbpInstrs = commit_inst_mask & commit_cfi_mask 1056 1057 val mbpRights = mbpInstrs & commit_not_mispred_mask 1058 val mbpWrongs = mbpInstrs & commit_mispred_mask 1059 1060 io.bpuInfo.bpRight := PopCount(mbpRights) 1061 io.bpuInfo.bpWrong := PopCount(mbpWrongs) 1062 1063 // Cfi Info 1064 for (i <- 0 until PredictWidth) { 1065 val pc = commit_pc_bundle.startAddr + (i * instBytes).U 1066 val v = commit_state(i) === c_commited 1067 val isBr = commit_pd.brMask(i) 1068 val isJmp = commit_pd.jmpInfo.valid && commit_pd.jmpOffset === i.U 1069 val isCfi = isBr || isJmp 1070 val isTaken = commit_cfi.valid && commit_cfi.bits === i.U 1071 val misPred = commit_mispredict(i) 1072 // val ghist = commit_spec_meta.ghist.predHist 1073 val histPtr = commit_spec_meta.histPtr 1074 val predCycle = commit_meta.meta(63, 0) 1075 val target = commit_target 1076 1077 val brIdx = OHToUInt(Reverse(Cat(update_ftb_entry.brValids.zip(update_ftb_entry.brOffset).map{case(v, offset) => v && offset === i.U}))) 1078 val inFtbEntry = update_ftb_entry.brValids.zip(update_ftb_entry.brOffset).map{case(v, offset) => v && offset === i.U}.reduce(_||_) 1079 val addIntoHist = ((commit_hit === h_hit) && inFtbEntry) || ((!(commit_hit === h_hit) && i.U === commit_cfi.bits && isBr && commit_cfi.valid)) 1080 XSDebug(v && do_commit && isCfi, p"cfi_update: isBr(${isBr}) pc(${Hexadecimal(pc)}) " + 1081 p"taken(${isTaken}) mispred(${misPred}) cycle($predCycle) hist(${histPtr.value}) " + 1082 p"startAddr(${Hexadecimal(commit_pc_bundle.startAddr)}) AddIntoHist(${addIntoHist}) " + 1083 p"brInEntry(${inFtbEntry}) brIdx(${brIdx}) target(${Hexadecimal(target)})\n") 1084 } 1085 1086 val enq = io.fromBpu.resp 1087 val perf_redirect = backendRedirect 1088 1089 XSPerfAccumulate("entry", validEntries) 1090 XSPerfAccumulate("bpu_to_ftq_stall", enq.valid && !enq.ready) 1091 XSPerfAccumulate("mispredictRedirect", perf_redirect.valid && RedirectLevel.flushAfter === perf_redirect.bits.level) 1092 XSPerfAccumulate("replayRedirect", perf_redirect.valid && RedirectLevel.flushItself(perf_redirect.bits.level)) 1093 XSPerfAccumulate("predecodeRedirect", fromIfuRedirect.valid) 1094 1095 XSPerfAccumulate("to_ifu_bubble", io.toIfu.req.ready && !io.toIfu.req.valid) 1096 1097 XSPerfAccumulate("to_ifu_stall", io.toIfu.req.valid && !io.toIfu.req.ready) 1098 XSPerfAccumulate("from_bpu_real_bubble", !enq.valid && enq.ready && allowBpuIn) 1099 XSPerfAccumulate("bpu_to_ifu_bubble", bpuPtr === ifuPtr) 1100 1101 val from_bpu = io.fromBpu.resp.bits 1102 def in_entry_len_map_gen(resp: BranchPredictionBundle)(stage: String) = { 1103 assert(!resp.is_minimal) 1104 val entry_len = (resp.ftb_entry.getFallThrough(resp.pc) - resp.pc) >> instOffsetBits 1105 val entry_len_recording_vec = (1 to PredictWidth+1).map(i => entry_len === i.U) 1106 val entry_len_map = (1 to PredictWidth+1).map(i => 1107 f"${stage}_ftb_entry_len_$i" -> (entry_len_recording_vec(i-1) && resp.valid) 1108 ).foldLeft(Map[String, UInt]())(_+_) 1109 entry_len_map 1110 } 1111 val s2_entry_len_map = in_entry_len_map_gen(from_bpu.s2)("s2") 1112 val s3_entry_len_map = in_entry_len_map_gen(from_bpu.s3)("s3") 1113 1114 val to_ifu = io.toIfu.req.bits 1115 1116 1117 1118 val commit_num_inst_recording_vec = (1 to PredictWidth).map(i => PopCount(commit_inst_mask) === i.U) 1119 val commit_num_inst_map = (1 to PredictWidth).map(i => 1120 f"commit_num_inst_$i" -> (commit_num_inst_recording_vec(i-1) && do_commit) 1121 ).foldLeft(Map[String, UInt]())(_+_) 1122 1123 1124 1125 val commit_jal_mask = UIntToOH(commit_pd.jmpOffset) & Fill(PredictWidth, commit_pd.hasJal.asTypeOf(UInt(1.W))) 1126 val commit_jalr_mask = UIntToOH(commit_pd.jmpOffset) & Fill(PredictWidth, commit_pd.hasJalr.asTypeOf(UInt(1.W))) 1127 val commit_call_mask = UIntToOH(commit_pd.jmpOffset) & Fill(PredictWidth, commit_pd.hasCall.asTypeOf(UInt(1.W))) 1128 val commit_ret_mask = UIntToOH(commit_pd.jmpOffset) & Fill(PredictWidth, commit_pd.hasRet.asTypeOf(UInt(1.W))) 1129 1130 1131 val mbpBRights = mbpRights & commit_br_mask 1132 val mbpJRights = mbpRights & commit_jal_mask 1133 val mbpIRights = mbpRights & commit_jalr_mask 1134 val mbpCRights = mbpRights & commit_call_mask 1135 val mbpRRights = mbpRights & commit_ret_mask 1136 1137 val mbpBWrongs = mbpWrongs & commit_br_mask 1138 val mbpJWrongs = mbpWrongs & commit_jal_mask 1139 val mbpIWrongs = mbpWrongs & commit_jalr_mask 1140 val mbpCWrongs = mbpWrongs & commit_call_mask 1141 val mbpRWrongs = mbpWrongs & commit_ret_mask 1142 1143 val commit_pred_stage = RegNext(pred_stage(commPtr.value)) 1144 1145 def pred_stage_map(src: UInt, name: String) = { 1146 (0 until numBpStages).map(i => 1147 f"${name}_stage_${i+1}" -> PopCount(src.asBools.map(_ && commit_pred_stage === BP_STAGES(i))) 1148 ).foldLeft(Map[String, UInt]())(_+_) 1149 } 1150 1151 val mispred_stage_map = pred_stage_map(mbpWrongs, "mispredict") 1152 val br_mispred_stage_map = pred_stage_map(mbpBWrongs, "br_mispredict") 1153 val jalr_mispred_stage_map = pred_stage_map(mbpIWrongs, "jalr_mispredict") 1154 val correct_stage_map = pred_stage_map(mbpRights, "correct") 1155 val br_correct_stage_map = pred_stage_map(mbpBRights, "br_correct") 1156 val jalr_correct_stage_map = pred_stage_map(mbpIRights, "jalr_correct") 1157 1158 val update_valid = io.toBpu.update.valid 1159 def u(cond: Bool) = update_valid && cond 1160 val ftb_false_hit = u(update.false_hit) 1161 // assert(!ftb_false_hit) 1162 val ftb_hit = u(commit_hit === h_hit) 1163 1164 val ftb_new_entry = u(ftbEntryGen.is_init_entry) 1165 val ftb_new_entry_only_br = ftb_new_entry && !update_ftb_entry.jmpValid 1166 val ftb_new_entry_only_jmp = ftb_new_entry && !update_ftb_entry.brValids(0) 1167 val ftb_new_entry_has_br_and_jmp = ftb_new_entry && update_ftb_entry.brValids(0) && update_ftb_entry.jmpValid 1168 1169 val ftb_old_entry = u(ftbEntryGen.is_old_entry) 1170 1171 val ftb_modified_entry = u(ftbEntryGen.is_new_br || ftbEntryGen.is_jalr_target_modified || ftbEntryGen.is_always_taken_modified) 1172 val ftb_modified_entry_new_br = u(ftbEntryGen.is_new_br) 1173 val ftb_modified_entry_jalr_target_modified = u(ftbEntryGen.is_jalr_target_modified) 1174 val ftb_modified_entry_br_full = ftb_modified_entry && ftbEntryGen.is_br_full 1175 val ftb_modified_entry_always_taken = ftb_modified_entry && ftbEntryGen.is_always_taken_modified 1176 1177 val ftb_entry_len = (ftbEntryGen.new_entry.getFallThrough(update.pc) - update.pc) >> instOffsetBits 1178 val ftb_entry_len_recording_vec = (1 to PredictWidth+1).map(i => ftb_entry_len === i.U) 1179 val ftb_init_entry_len_map = (1 to PredictWidth+1).map(i => 1180 f"ftb_init_entry_len_$i" -> (ftb_entry_len_recording_vec(i-1) && ftb_new_entry) 1181 ).foldLeft(Map[String, UInt]())(_+_) 1182 val ftb_modified_entry_len_map = (1 to PredictWidth+1).map(i => 1183 f"ftb_modified_entry_len_$i" -> (ftb_entry_len_recording_vec(i-1) && ftb_modified_entry) 1184 ).foldLeft(Map[String, UInt]())(_+_) 1185 1186 val ftq_occupancy_map = (0 to FtqSize).map(i => 1187 f"ftq_has_entry_$i" ->( validEntries === i.U) 1188 ).foldLeft(Map[String, UInt]())(_+_) 1189 1190 val perfCountsMap = Map( 1191 "BpInstr" -> PopCount(mbpInstrs), 1192 "BpBInstr" -> PopCount(mbpBRights | mbpBWrongs), 1193 "BpRight" -> PopCount(mbpRights), 1194 "BpWrong" -> PopCount(mbpWrongs), 1195 "BpBRight" -> PopCount(mbpBRights), 1196 "BpBWrong" -> PopCount(mbpBWrongs), 1197 "BpJRight" -> PopCount(mbpJRights), 1198 "BpJWrong" -> PopCount(mbpJWrongs), 1199 "BpIRight" -> PopCount(mbpIRights), 1200 "BpIWrong" -> PopCount(mbpIWrongs), 1201 "BpCRight" -> PopCount(mbpCRights), 1202 "BpCWrong" -> PopCount(mbpCWrongs), 1203 "BpRRight" -> PopCount(mbpRRights), 1204 "BpRWrong" -> PopCount(mbpRWrongs), 1205 1206 "ftb_false_hit" -> PopCount(ftb_false_hit), 1207 "ftb_hit" -> PopCount(ftb_hit), 1208 "ftb_new_entry" -> PopCount(ftb_new_entry), 1209 "ftb_new_entry_only_br" -> PopCount(ftb_new_entry_only_br), 1210 "ftb_new_entry_only_jmp" -> PopCount(ftb_new_entry_only_jmp), 1211 "ftb_new_entry_has_br_and_jmp" -> PopCount(ftb_new_entry_has_br_and_jmp), 1212 "ftb_old_entry" -> PopCount(ftb_old_entry), 1213 "ftb_modified_entry" -> PopCount(ftb_modified_entry), 1214 "ftb_modified_entry_new_br" -> PopCount(ftb_modified_entry_new_br), 1215 "ftb_jalr_target_modified" -> PopCount(ftb_modified_entry_jalr_target_modified), 1216 "ftb_modified_entry_br_full" -> PopCount(ftb_modified_entry_br_full), 1217 "ftb_modified_entry_always_taken" -> PopCount(ftb_modified_entry_always_taken) 1218 ) ++ ftb_init_entry_len_map ++ ftb_modified_entry_len_map ++ s2_entry_len_map ++ 1219 s3_entry_len_map ++ commit_num_inst_map ++ ftq_occupancy_map ++ 1220 mispred_stage_map ++ br_mispred_stage_map ++ jalr_mispred_stage_map ++ 1221 correct_stage_map ++ br_correct_stage_map ++ jalr_correct_stage_map 1222 1223 for((key, value) <- perfCountsMap) { 1224 XSPerfAccumulate(key, value) 1225 } 1226 1227 // --------------------------- Debug -------------------------------- 1228 // XSDebug(enq_fire, p"enq! " + io.fromBpu.resp.bits.toPrintable) 1229 XSDebug(io.toIfu.req.fire, p"fire to ifu " + io.toIfu.req.bits.toPrintable) 1230 XSDebug(do_commit, p"deq! [ptr] $do_commit_ptr\n") 1231 XSDebug(true.B, p"[bpuPtr] $bpuPtr, [ifuPtr] $ifuPtr, [ifuWbPtr] $ifuWbPtr [commPtr] $commPtr\n") 1232 XSDebug(true.B, p"[in] v:${io.fromBpu.resp.valid} r:${io.fromBpu.resp.ready} " + 1233 p"[out] v:${io.toIfu.req.valid} r:${io.toIfu.req.ready}\n") 1234 XSDebug(do_commit, p"[deq info] cfiIndex: $commit_cfi, $commit_pc_bundle, target: ${Hexadecimal(commit_target)}\n") 1235 1236 // def ubtbCheck(commit: FtqEntry, predAns: Seq[PredictorAnswer], isWrong: Bool) = { 1237 // commit.valids.zip(commit.pd).zip(predAns).zip(commit.takens).map { 1238 // case (((valid, pd), ans), taken) => 1239 // Mux(valid && pd.isBr, 1240 // isWrong ^ Mux(ans.hit.asBool, 1241 // Mux(ans.taken.asBool, taken && ans.target === commitEntry.target, 1242 // !taken), 1243 // !taken), 1244 // false.B) 1245 // } 1246 // } 1247 1248 // def btbCheck(commit: FtqEntry, predAns: Seq[PredictorAnswer], isWrong: Bool) = { 1249 // commit.valids.zip(commit.pd).zip(predAns).zip(commit.takens).map { 1250 // case (((valid, pd), ans), taken) => 1251 // Mux(valid && pd.isBr, 1252 // isWrong ^ Mux(ans.hit.asBool, 1253 // Mux(ans.taken.asBool, taken && ans.target === commitEntry.target, 1254 // !taken), 1255 // !taken), 1256 // false.B) 1257 // } 1258 // } 1259 1260 // def tageCheck(commit: FtqEntry, predAns: Seq[PredictorAnswer], isWrong: Bool) = { 1261 // commit.valids.zip(commit.pd).zip(predAns).zip(commit.takens).map { 1262 // case (((valid, pd), ans), taken) => 1263 // Mux(valid && pd.isBr, 1264 // isWrong ^ (ans.taken.asBool === taken), 1265 // false.B) 1266 // } 1267 // } 1268 1269 // def loopCheck(commit: FtqEntry, predAns: Seq[PredictorAnswer], isWrong: Bool) = { 1270 // commit.valids.zip(commit.pd).zip(predAns).zip(commit.takens).map { 1271 // case (((valid, pd), ans), taken) => 1272 // Mux(valid && (pd.isBr) && ans.hit.asBool, 1273 // isWrong ^ (!taken), 1274 // false.B) 1275 // } 1276 // } 1277 1278 // def rasCheck(commit: FtqEntry, predAns: Seq[PredictorAnswer], isWrong: Bool) = { 1279 // commit.valids.zip(commit.pd).zip(predAns).zip(commit.takens).map { 1280 // case (((valid, pd), ans), taken) => 1281 // Mux(valid && pd.isRet.asBool /*&& taken*/ && ans.hit.asBool, 1282 // isWrong ^ (ans.target === commitEntry.target), 1283 // false.B) 1284 // } 1285 // } 1286 1287 // val ubtbRights = ubtbCheck(commitEntry, commitEntry.metas.map(_.ubtbAns), false.B) 1288 // val ubtbWrongs = ubtbCheck(commitEntry, commitEntry.metas.map(_.ubtbAns), true.B) 1289 // // btb and ubtb pred jal and jalr as well 1290 // val btbRights = btbCheck(commitEntry, commitEntry.metas.map(_.btbAns), false.B) 1291 // val btbWrongs = btbCheck(commitEntry, commitEntry.metas.map(_.btbAns), true.B) 1292 // val tageRights = tageCheck(commitEntry, commitEntry.metas.map(_.tageAns), false.B) 1293 // val tageWrongs = tageCheck(commitEntry, commitEntry.metas.map(_.tageAns), true.B) 1294 1295 // val loopRights = loopCheck(commitEntry, commitEntry.metas.map(_.loopAns), false.B) 1296 // val loopWrongs = loopCheck(commitEntry, commitEntry.metas.map(_.loopAns), true.B) 1297 1298 // val rasRights = rasCheck(commitEntry, commitEntry.metas.map(_.rasAns), false.B) 1299 // val rasWrongs = rasCheck(commitEntry, commitEntry.metas.map(_.rasAns), true.B) 1300 1301 val perfEvents = Seq( 1302 ("bpu_s2_redirect ", bpu_s2_redirect ), 1303 ("bpu_s3_redirect ", bpu_s3_redirect ), 1304 ("bpu_to_ftq_stall ", enq.valid && ~enq.ready ), 1305 ("mispredictRedirect ", perf_redirect.valid && RedirectLevel.flushAfter === perf_redirect.bits.level), 1306 ("replayRedirect ", perf_redirect.valid && RedirectLevel.flushItself(perf_redirect.bits.level) ), 1307 ("predecodeRedirect ", fromIfuRedirect.valid ), 1308 ("to_ifu_bubble ", io.toIfu.req.ready && !io.toIfu.req.valid ), 1309 ("from_bpu_real_bubble ", !enq.valid && enq.ready && allowBpuIn ), 1310 ("BpInstr ", PopCount(mbpInstrs) ), 1311 ("BpBInstr ", PopCount(mbpBRights | mbpBWrongs) ), 1312 ("BpRight ", PopCount(mbpRights) ), 1313 ("BpWrong ", PopCount(mbpWrongs) ), 1314 ("BpBRight ", PopCount(mbpBRights) ), 1315 ("BpBWrong ", PopCount(mbpBWrongs) ), 1316 ("BpJRight ", PopCount(mbpJRights) ), 1317 ("BpJWrong ", PopCount(mbpJWrongs) ), 1318 ("BpIRight ", PopCount(mbpIRights) ), 1319 ("BpIWrong ", PopCount(mbpIWrongs) ), 1320 ("BpCRight ", PopCount(mbpCRights) ), 1321 ("BpCWrong ", PopCount(mbpCWrongs) ), 1322 ("BpRRight ", PopCount(mbpRRights) ), 1323 ("BpRWrong ", PopCount(mbpRWrongs) ), 1324 ("ftb_false_hit ", PopCount(ftb_false_hit) ), 1325 ("ftb_hit ", PopCount(ftb_hit) ), 1326 ) 1327 generatePerfEvent() 1328} 1329