1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.frontend 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.experimental.chiselName 22import chisel3.util._ 23import xiangshan._ 24import utils._ 25 26import scala.math.min 27 28trait HasBPUConst extends HasXSParameter { 29 val MaxMetaLength = 512 // TODO: Reduce meta length 30 val MaxBasicBlockSize = 32 31 val LHistoryLength = 32 32 // val numBr = 2 33 val useBPD = true 34 val useLHist = true 35 val numBrSlot = numBr-1 36 val totalSlot = numBrSlot + 1 37 38 def BP_STAGES = (0 until 3).map(_.U(2.W)) 39 def BP_S1 = BP_STAGES(0) 40 def BP_S2 = BP_STAGES(1) 41 def BP_S3 = BP_STAGES(2) 42 val numBpStages = BP_STAGES.length 43 44 val debug = true 45 val resetVector = 0x10000000L 46 // TODO: Replace log2Up by log2Ceil 47} 48 49trait HasBPUParameter extends HasXSParameter with HasBPUConst { 50 val BPUDebug = true && !env.FPGAPlatform && env.EnablePerfDebug 51 val EnableCFICommitLog = true 52 val EnbaleCFIPredLog = true 53 val EnableBPUTimeRecord = (EnableCFICommitLog || EnbaleCFIPredLog) && !env.FPGAPlatform 54 val EnableCommit = false 55} 56 57class BPUCtrl(implicit p: Parameters) extends XSBundle { 58 val ubtb_enable = Bool() 59 val btb_enable = Bool() 60 val bim_enable = Bool() 61 val tage_enable = Bool() 62 val sc_enable = Bool() 63 val ras_enable = Bool() 64 val loop_enable = Bool() 65} 66 67trait BPUUtils extends HasXSParameter { 68 // circular shifting 69 def circularShiftLeft(source: UInt, len: Int, shamt: UInt): UInt = { 70 val res = Wire(UInt(len.W)) 71 val higher = source << shamt 72 val lower = source >> (len.U - shamt) 73 res := higher | lower 74 res 75 } 76 77 def circularShiftRight(source: UInt, len: Int, shamt: UInt): UInt = { 78 val res = Wire(UInt(len.W)) 79 val higher = source << (len.U - shamt) 80 val lower = source >> shamt 81 res := higher | lower 82 res 83 } 84 85 // To be verified 86 def satUpdate(old: UInt, len: Int, taken: Bool): UInt = { 87 val oldSatTaken = old === ((1 << len)-1).U 88 val oldSatNotTaken = old === 0.U 89 Mux(oldSatTaken && taken, ((1 << len)-1).U, 90 Mux(oldSatNotTaken && !taken, 0.U, 91 Mux(taken, old + 1.U, old - 1.U))) 92 } 93 94 def signedSatUpdate(old: SInt, len: Int, taken: Bool): SInt = { 95 val oldSatTaken = old === ((1 << (len-1))-1).S 96 val oldSatNotTaken = old === (-(1 << (len-1))).S 97 Mux(oldSatTaken && taken, ((1 << (len-1))-1).S, 98 Mux(oldSatNotTaken && !taken, (-(1 << (len-1))).S, 99 Mux(taken, old + 1.S, old - 1.S))) 100 } 101 102 def getFallThroughAddr(start: UInt, carry: Bool, pft: UInt) = { 103 val higher = start.head(VAddrBits-log2Ceil(PredictWidth)-instOffsetBits-1) 104 Cat(Mux(carry, higher+1.U, higher), pft, 0.U(instOffsetBits.W)) 105 } 106 107 def foldTag(tag: UInt, l: Int): UInt = { 108 val nChunks = (tag.getWidth + l - 1) / l 109 val chunks = (0 until nChunks).map { i => 110 tag(min((i+1)*l, tag.getWidth)-1, i*l) 111 } 112 ParallelXOR(chunks) 113 } 114} 115 116// class BranchPredictionUpdate(implicit p: Parameters) extends XSBundle with HasBPUConst { 117// val pc = UInt(VAddrBits.W) 118// val br_offset = Vec(num_br, UInt(log2Up(MaxBasicBlockSize).W)) 119// val br_mask = Vec(MaxBasicBlockSize, Bool()) 120// 121// val jmp_valid = Bool() 122// val jmp_type = UInt(3.W) 123// 124// val is_NextMask = Vec(FetchWidth*2, Bool()) 125// 126// val cfi_idx = Valid(UInt(log2Ceil(MaxBasicBlockSize).W)) 127// val cfi_mispredict = Bool() 128// val cfi_is_br = Bool() 129// val cfi_is_jal = Bool() 130// val cfi_is_jalr = Bool() 131// 132// val ghist = new ShiftingGlobalHistory() 133// 134// val target = UInt(VAddrBits.W) 135// 136// val meta = UInt(MaxMetaLength.W) 137// val spec_meta = UInt(MaxMetaLength.W) 138// 139// def taken = cfi_idx.valid 140// } 141 142class AllFoldedHistories(val gen: Seq[Tuple2[Int, Int]])(implicit p: Parameters) extends XSBundle with HasBPUConst { 143 val hist = MixedVec(gen.map{case (l, cl) => new FoldedHistory(l, cl, numBr)}) 144 // println(gen.mkString) 145 require(gen.toSet.toList.equals(gen)) 146 def getHistWithInfo(info: Tuple2[Int, Int]) = { 147 val selected = hist.filter(_.info.equals(info)) 148 require(selected.length == 1) 149 selected(0) 150 } 151 def autoConnectFrom(that: AllFoldedHistories) = { 152 require(this.hist.length <= that.hist.length) 153 for (h <- this.hist) { 154 h := that.getHistWithInfo(h.info) 155 } 156 } 157 def update(ghv: Vec[Bool], ptr: CGHPtr, shift: Int, taken: Bool): AllFoldedHistories = { 158 val res = WireInit(this) 159 for (i <- 0 until this.hist.length) { 160 res.hist(i) := this.hist(i).update(ghv, ptr, shift, taken) 161 } 162 res 163 } 164 165 def display(cond: Bool) = { 166 for (h <- hist) { 167 XSDebug(cond, p"hist len ${h.len}, folded len ${h.compLen}, value ${Binary(h.folded_hist)}\n") 168 } 169 } 170} 171 172class BasePredictorInput (implicit p: Parameters) extends XSBundle with HasBPUConst { 173 def nInputs = 1 174 175 val s0_pc = UInt(VAddrBits.W) 176 177 val folded_hist = new AllFoldedHistories(foldedGHistInfos) 178 val ghist = UInt(HistoryLength.W) 179 180 val resp_in = Vec(nInputs, new BranchPredictionResp) 181 182 // val final_preds = Vec(numBpStages, new) 183 // val toFtq_fire = Bool() 184 185 // val s0_all_ready = Bool() 186} 187 188class BasePredictorOutput (implicit p: Parameters) extends XSBundle with HasBPUConst { 189 val last_stage_meta = UInt(MaxMetaLength.W) // This is use by composer 190 val resp = new BranchPredictionResp 191 192 // These store in meta, extract in composer 193 // val rasSp = UInt(log2Ceil(RasSize).W) 194 // val rasTop = new RASEntry 195 // val specCnt = Vec(PredictWidth, UInt(10.W)) 196} 197 198class BasePredictorIO (implicit p: Parameters) extends XSBundle with HasBPUConst { 199 val in = Flipped(DecoupledIO(new BasePredictorInput)) // TODO: Remove DecoupledIO 200 // val out = DecoupledIO(new BasePredictorOutput) 201 val out = Output(new BasePredictorOutput) 202 // val flush_out = Valid(UInt(VAddrBits.W)) 203 204 // val ctrl = Input(new BPUCtrl()) 205 206 val s0_fire = Input(Bool()) 207 val s1_fire = Input(Bool()) 208 val s2_fire = Input(Bool()) 209 val s3_fire = Input(Bool()) 210 211 val s1_ready = Output(Bool()) 212 val s2_ready = Output(Bool()) 213 val s3_ready = Output(Bool()) 214 215 val update = Flipped(Valid(new BranchPredictionUpdate)) 216 val redirect = Flipped(Valid(new BranchPredictionRedirect)) 217} 218 219abstract class BasePredictor(implicit p: Parameters) extends XSModule 220 with HasBPUConst with BPUUtils with HasPerfEvents { 221 val meta_size = 0 222 val spec_meta_size = 0 223 val io = IO(new BasePredictorIO()) 224 225 io.out.resp := io.in.bits.resp_in(0) 226 227 io.out.last_stage_meta := 0.U 228 229 io.in.ready := !io.redirect.valid 230 231 io.s1_ready := true.B 232 io.s2_ready := true.B 233 io.s3_ready := true.B 234 235 val s0_pc = WireInit(io.in.bits.s0_pc) // fetchIdx(io.f0_pc) 236 val s1_pc = RegEnable(s0_pc, resetVector.U, io.s0_fire) 237 val s2_pc = RegEnable(s1_pc, io.s1_fire) 238 val s3_pc = RegEnable(s2_pc, io.s2_fire) 239 240 io.out.resp.s1.pc := s1_pc 241 io.out.resp.s2.pc := s2_pc 242 io.out.resp.s3.pc := s3_pc 243 244 val perfEvents: Seq[(String, UInt)] = Seq() 245 246 247 def getFoldedHistoryInfo: Option[Set[FoldedHistoryInfo]] = None 248} 249 250class FakePredictor(implicit p: Parameters) extends BasePredictor { 251 io.in.ready := true.B 252 io.out.last_stage_meta := 0.U 253 io.out.resp := io.in.bits.resp_in(0) 254} 255 256class BpuToFtqIO(implicit p: Parameters) extends XSBundle { 257 val resp = DecoupledIO(new BpuToFtqBundle()) 258} 259 260class PredictorIO(implicit p: Parameters) extends XSBundle { 261 val bpu_to_ftq = new BpuToFtqIO() 262 val ftq_to_bpu = Flipped(new FtqToBpuIO()) 263} 264 265@chiselName 266class Predictor(implicit p: Parameters) extends XSModule with HasBPUConst with HasPerfEvents { 267 val io = IO(new PredictorIO) 268 269 val predictors = Module(if (useBPD) new Composer else new FakePredictor) 270 271 val folded_hist_infos = predictors.getFoldedHistoryInfo.getOrElse(Set()).toList 272 for ((len, compLen) <- folded_hist_infos) { 273 println(f"folded hist info: len $len, compLen $compLen") 274 } 275 276 val s0_fire, s1_fire, s2_fire, s3_fire = Wire(Bool()) 277 val s1_valid, s2_valid, s3_valid = RegInit(false.B) 278 val s1_ready, s2_ready, s3_ready = Wire(Bool()) 279 val s1_components_ready, s2_components_ready, s3_components_ready = Wire(Bool()) 280 281 val s0_pc = WireInit(resetVector.U) 282 val s0_pc_reg = RegNext(s0_pc, init=resetVector.U) 283 val s1_pc = RegEnable(s0_pc, s0_fire) 284 val s2_pc = RegEnable(s1_pc, s1_fire) 285 val s3_pc = RegEnable(s2_pc, s2_fire) 286 287 val s0_folded_gh = Wire(new AllFoldedHistories(foldedGHistInfos)) 288 val s0_folded_gh_reg = RegNext(s0_folded_gh, init=0.U.asTypeOf(s0_folded_gh)) 289 val s1_folded_gh = RegEnable(s0_folded_gh, 0.U.asTypeOf(s0_folded_gh), s0_fire) 290 val s2_folded_gh = RegEnable(s1_folded_gh, 0.U.asTypeOf(s0_folded_gh), s1_fire) 291 val s3_folded_gh = RegEnable(s2_folded_gh, 0.U.asTypeOf(s0_folded_gh), s2_fire) 292 293 val npcGen = new PhyPriorityMuxGenerator[UInt] 294 val foldedGhGen = new PhyPriorityMuxGenerator[AllFoldedHistories] 295 val ghistPtrGen = new PhyPriorityMuxGenerator[CGHPtr] 296 val ghvBitWriteGens = Seq.tabulate(HistoryLength)(n => new PhyPriorityMuxGenerator[Bool]) 297 // val ghistGen = new PhyPriorityMuxGenerator[UInt] 298 299 val ghv = RegInit(0.U.asTypeOf(Vec(HistoryLength, Bool()))) 300 val ghv_wire = WireInit(ghv) 301 302 val s0_ghist = WireInit(0.U.asTypeOf(UInt(HistoryLength.W))) 303 304 305 val ghv_write_datas = Wire(Vec(HistoryLength, Bool())) 306 val ghv_wens = Wire(Vec(HistoryLength, Bool())) 307 308 val s0_ghist_ptr = Wire(new CGHPtr) 309 val s0_ghist_ptr_reg = RegNext(s0_ghist_ptr, init=0.U.asTypeOf(new CGHPtr)) 310 val s1_ghist_ptr = RegEnable(s0_ghist_ptr, 0.U.asTypeOf(new CGHPtr), s0_fire) 311 val s2_ghist_ptr = RegEnable(s1_ghist_ptr, 0.U.asTypeOf(new CGHPtr), s1_fire) 312 val s3_ghist_ptr = RegEnable(s2_ghist_ptr, 0.U.asTypeOf(new CGHPtr), s2_fire) 313 314 def getHist(ptr: CGHPtr): UInt = (Cat(ghv_wire.asUInt, ghv_wire.asUInt) >> (ptr.value+1.U))(HistoryLength-1, 0) 315 s0_ghist := getHist(s0_ghist_ptr) 316 317 val resp = predictors.io.out.resp 318 319 320 val toFtq_fire = io.bpu_to_ftq.resp.valid && io.bpu_to_ftq.resp.ready 321 322 val s1_flush, s2_flush, s3_flush = Wire(Bool()) 323 val s2_redirect, s3_redirect = Wire(Bool()) 324 325 // predictors.io := DontCare 326 predictors.io.in.valid := s0_fire 327 predictors.io.in.bits.s0_pc := s0_pc 328 predictors.io.in.bits.ghist := s0_ghist 329 predictors.io.in.bits.folded_hist := s0_folded_gh 330 predictors.io.in.bits.resp_in(0) := (0.U).asTypeOf(new BranchPredictionResp) 331 // predictors.io.in.bits.resp_in(0).s1.pc := s0_pc 332 // predictors.io.in.bits.toFtq_fire := toFtq_fire 333 334 // predictors.io.out.ready := io.bpu_to_ftq.resp.ready 335 336 // Pipeline logic 337 s2_redirect := false.B 338 s3_redirect := false.B 339 340 s3_flush := io.ftq_to_bpu.redirect.valid 341 s2_flush := s3_flush || s3_redirect 342 s1_flush := s2_flush || s2_redirect 343 344 s1_components_ready := predictors.io.s1_ready 345 s1_ready := s1_fire || !s1_valid 346 s0_fire := !reset.asBool && s1_components_ready && s1_ready 347 predictors.io.s0_fire := s0_fire 348 349 s2_components_ready := predictors.io.s2_ready 350 s2_ready := s2_fire || !s2_valid 351 s1_fire := s1_valid && s2_components_ready && s2_ready && io.bpu_to_ftq.resp.ready 352 353 s3_components_ready := predictors.io.s3_ready 354 s3_ready := s3_fire || !s3_valid 355 s2_fire := s2_valid && s3_components_ready && s3_ready 356 357 when(s0_fire) { s1_valid := true.B } 358 .elsewhen(s1_flush) { s1_valid := false.B } 359 .elsewhen(s1_fire) { s1_valid := false.B } 360 361 predictors.io.s1_fire := s1_fire 362 363 s2_fire := s2_valid 364 365 when(s2_flush) { s2_valid := false.B } 366 .elsewhen(s1_fire) { s2_valid := !s1_flush } 367 .elsewhen(s2_fire) { s2_valid := false.B } 368 369 predictors.io.s2_fire := s2_fire 370 371 s3_fire := s3_valid 372 373 when(s3_flush) { s3_valid := false.B } 374 .elsewhen(s2_fire) { s3_valid := !s2_flush } 375 .elsewhen(s3_fire) { s3_valid := false.B } 376 377 predictors.io.s3_fire := s3_fire 378 379 380 io.bpu_to_ftq.resp.valid := 381 s1_valid && s2_components_ready && s2_ready || 382 s2_fire && s2_redirect || 383 s3_fire && s3_redirect 384 io.bpu_to_ftq.resp.bits := BpuToFtqBundle(predictors.io.out.resp) 385 io.bpu_to_ftq.resp.bits.meta := predictors.io.out.last_stage_meta // TODO: change to lastStageMeta 386 io.bpu_to_ftq.resp.bits.s3.folded_hist := s3_folded_gh 387 io.bpu_to_ftq.resp.bits.s3.histPtr := s3_ghist_ptr 388 389 npcGen.register(true.B, s0_pc_reg, Some("stallPC"), 0) 390 foldedGhGen.register(true.B, s0_folded_gh_reg, Some("stallFGH"), 0) 391 ghistPtrGen.register(true.B, s0_ghist_ptr_reg, Some("stallGHPtr"), 0) 392 393 // History manage 394 // s1 395 val s1_possible_predicted_ghist_ptrs = (0 to numBr).map(s1_ghist_ptr - _.U) 396 val s1_predicted_ghist_ptr = Mux1H(resp.s1.lastBrPosOH, s1_possible_predicted_ghist_ptrs) 397 398 val s1_possible_predicted_fhs = (0 to numBr).map(i => 399 s1_folded_gh.update(ghv, s1_ghist_ptr, i, resp.s1.brTaken && resp.s1.lastBrPosOH(i))) 400 val s1_predicted_fh = Mux1H(resp.s1.lastBrPosOH, s1_possible_predicted_fhs) 401 402 if (EnableGHistDiff) { 403 val s1_predicted_ghist = WireInit(getHist(s1_predicted_ghist_ptr).asTypeOf(Vec(HistoryLength, Bool()))) 404 for (i <- 0 until numBr) { 405 when (resp.s1.shouldShiftVec(i)) { 406 s1_predicted_ghist(i) := resp.s1.brTaken && (i==0).B 407 } 408 } 409 when (s1_valid) { 410 s0_ghist := s1_predicted_ghist.asUInt 411 } 412 } 413 414 require(isPow2(HistoryLength)) 415 val s1_ghv_wens = (0 until HistoryLength).map(n => 416 (0 until numBr).map(b => (s1_ghist_ptr).value === n.U(log2Ceil(HistoryLength).W) + b.U && resp.s1.shouldShiftVec(b) && s1_valid)) 417 val s1_ghv_wdatas = (0 until HistoryLength).map(n => 418 Mux1H( 419 (0 until numBr).map(b => ( 420 (s1_ghist_ptr).value === n.U(log2Ceil(HistoryLength).W) + b.U && resp.s1.shouldShiftVec(b), 421 resp.s1.brTaken && resp.s1.lastBrPosOH(b+1) 422 )) 423 ) 424 ) 425 426 XSError(!resp.s1.is_minimal, "s1 should be minimal!\n") 427 428 npcGen.register(s1_valid, resp.s1.getTarget, Some("s1_target"), 4) 429 foldedGhGen.register(s1_valid, s1_predicted_fh, Some("s1_FGH"), 4) 430 ghistPtrGen.register(s1_valid, s1_predicted_ghist_ptr, Some("s1_GHPtr"), 4) 431 ghvBitWriteGens.zip(s1_ghv_wens).zipWithIndex.map{case ((b, w), i) => 432 b.register(w.reduce(_||_), s1_ghv_wdatas(i), Some(s"s1_new_bit_$i"), 4) 433 } 434 435 def preds_needs_redirect_vec(x: BranchPredictionBundle, y: BranchPredictionBundle) = { 436 VecInit( 437 x.getTarget =/= y.getTarget, 438 x.lastBrPosOH.asUInt =/= y.lastBrPosOH.asUInt, 439 x.taken =/= y.taken, 440 (x.taken && y.taken) && x.cfiIndex.bits =/= y.cfiIndex.bits, 441 (!x.taken && !y.taken) && x.oversize =/= y.oversize 442 // x.shouldShiftVec.asUInt =/= y.shouldShiftVec.asUInt, 443 // x.brTaken =/= y.brTaken 444 ) 445 } 446 447 // s2 448 val s2_possible_predicted_ghist_ptrs = (0 to numBr).map(s2_ghist_ptr - _.U) 449 val s2_predicted_ghist_ptr = Mux1H(resp.s2.lastBrPosOH, s2_possible_predicted_ghist_ptrs) 450 451 val s2_possible_predicted_fhs = (0 to numBr).map(i => 452 s2_folded_gh.update(ghv, s2_ghist_ptr, i, if (i > 0) resp.s2.full_pred.br_taken_mask(i-1) else false.B)) 453 val s2_predicted_fh = Mux1H(resp.s2.lastBrPosOH, s2_possible_predicted_fhs) 454 455 if (EnableGHistDiff) { 456 val s2_predicted_ghist = WireInit(getHist(s2_predicted_ghist_ptr).asTypeOf(Vec(HistoryLength, Bool()))) 457 for (i <- 0 until numBr) { 458 when (resp.s2.shouldShiftVec(i)) { 459 s2_predicted_ghist(i) := resp.s2.brTaken && (i==0).B 460 } 461 } 462 when(s2_redirect) { 463 s0_ghist := s2_predicted_ghist.asUInt 464 } 465 } 466 467 val s2_ghv_wens = (0 until HistoryLength).map(n => 468 (0 until numBr).map(b => (s2_ghist_ptr).value === n.U(log2Ceil(HistoryLength).W) + b.U && resp.s2.shouldShiftVec(b) && s2_redirect)) 469 val s2_ghv_wdatas = (0 until HistoryLength).map(n => 470 Mux1H( 471 (0 until numBr).map(b => ( 472 (s2_ghist_ptr).value === n.U(log2Ceil(HistoryLength).W) + b.U && resp.s2.shouldShiftVec(b), 473 resp.s2.full_pred.real_br_taken_mask()(b) 474 )) 475 ) 476 ) 477 478 val previous_s1_pred = RegEnable(resp.s1, init=0.U.asTypeOf(resp.s1), s1_fire) 479 480 val s2_redirect_s1_last_pred_vec = preds_needs_redirect_vec(previous_s1_pred, resp.s2) 481 482 s2_redirect := s2_fire && (s2_redirect_s1_last_pred_vec.reduce(_||_) || resp.s2.fallThruError) 483 484 XSError(resp.s2.is_minimal, "s2 should not be minimal!\n") 485 486 npcGen.register(s2_redirect, resp.s2.getTarget, Some("s2_target"), 5) 487 foldedGhGen.register(s2_redirect, s2_predicted_fh, Some("s2_FGH"), 5) 488 ghistPtrGen.register(s2_redirect, s2_predicted_ghist_ptr, Some("s2_GHPtr"), 5) 489 ghvBitWriteGens.zip(s2_ghv_wens).zipWithIndex.map{case ((b, w), i) => 490 b.register(w.reduce(_||_), s2_ghv_wdatas(i), Some(s"s2_new_bit_$i"), 5) 491 } 492 493 XSPerfAccumulate("s2_redirect_because_target_diff", s2_fire && s2_redirect_s1_last_pred_vec(0)) 494 XSPerfAccumulate("s2_redirect_because_branch_num_diff", s2_fire && s2_redirect_s1_last_pred_vec(1)) 495 XSPerfAccumulate("s2_redirect_because_direction_diff", s2_fire && s2_redirect_s1_last_pred_vec(2)) 496 XSPerfAccumulate("s2_redirect_because_cfi_idx_diff", s2_fire && s2_redirect_s1_last_pred_vec(3)) 497 // XSPerfAccumulate("s2_redirect_because_shouldShiftVec_diff", s2_fire && s2_redirect_s1_last_pred_vec(4)) 498 // XSPerfAccumulate("s2_redirect_because_brTaken_diff", s2_fire && s2_redirect_s1_last_pred_vec(5)) 499 XSPerfAccumulate("s2_redirect_because_fallThroughError", s2_fire && resp.s2.fallThruError) 500 501 XSPerfAccumulate("s2_redirect_when_taken", s2_redirect && resp.s2.taken && resp.s2.full_pred.hit) 502 XSPerfAccumulate("s2_redirect_when_not_taken", s2_redirect && !resp.s2.taken && resp.s2.full_pred.hit) 503 XSPerfAccumulate("s2_redirect_when_not_hit", s2_redirect && !resp.s2.full_pred.hit) 504 505 506 // s3 507 val s3_possible_predicted_ghist_ptrs = (0 to numBr).map(s3_ghist_ptr - _.U) 508 val s3_predicted_ghist_ptr = Mux1H(resp.s3.lastBrPosOH, s3_possible_predicted_ghist_ptrs) 509 510 val s3_possible_predicted_fhs = (0 to numBr).map(i => 511 s3_folded_gh.update(ghv, s3_ghist_ptr, i, if (i > 0) resp.s3.full_pred.br_taken_mask(i-1) else false.B)) 512 val s3_predicted_fh = Mux1H(resp.s3.lastBrPosOH, s3_possible_predicted_fhs) 513 514 if (EnableGHistDiff) { 515 val s3_predicted_ghist = WireInit(getHist(s3_predicted_ghist_ptr).asTypeOf(Vec(HistoryLength, Bool()))) 516 for (i <- 0 until numBr) { 517 when (resp.s3.shouldShiftVec(i)) { 518 s3_predicted_ghist(i) := resp.s3.brTaken && (i==0).B 519 } 520 } 521 when(s3_redirect) { 522 s0_ghist := s3_predicted_ghist.asUInt 523 } 524 } 525 526 val s3_ghv_wens = (0 until HistoryLength).map(n => 527 (0 until numBr).map(b => (s3_ghist_ptr).value === n.U(log2Ceil(HistoryLength).W) + b.U && resp.s3.shouldShiftVec(b) && s3_redirect)) 528 val s3_ghv_wdatas = (0 until HistoryLength).map(n => 529 Mux1H( 530 (0 until numBr).map(b => ( 531 (s3_ghist_ptr).value === n.U(log2Ceil(HistoryLength).W) + b.U && resp.s3.shouldShiftVec(b), 532 resp.s3.full_pred.real_br_taken_mask()(b) 533 )) 534 ) 535 ) 536 537 val previous_s2_pred = RegEnable(resp.s2, init=0.U.asTypeOf(resp.s2), s2_fire) 538 539 val s3_redirect_s2_last_pred_vec = preds_needs_redirect_vec(previous_s1_pred, resp.s2) 540 // TODO: 541 542 s3_redirect := s3_fire && !previous_s2_pred.fallThruError && ( 543 resp.s3.full_pred.real_br_taken_mask().asUInt =/= previous_s2_pred.full_pred.real_br_taken_mask().asUInt 544 ) 545 546 npcGen.register(s3_redirect, resp.s3.getTarget, Some("s3_target"), 3) 547 foldedGhGen.register(s3_redirect, s3_predicted_fh, Some("s3_FGH"), 3) 548 ghistPtrGen.register(s3_redirect, s3_predicted_ghist_ptr, Some("s3_GHPtr"), 3) 549 ghvBitWriteGens.zip(s3_ghv_wens).zipWithIndex.map{case ((b, w), i) => 550 b.register(w.reduce(_||_), s3_ghv_wdatas(i), Some(s"s3_new_bit_$i"), 3) 551 } 552 553 // Send signal tell Ftq override 554 val s2_ftq_idx = RegEnable(io.ftq_to_bpu.enq_ptr, s1_fire) 555 val s3_ftq_idx = RegEnable(s2_ftq_idx, s2_fire) 556 557 io.bpu_to_ftq.resp.bits.s1.valid := s1_fire && !s1_flush 558 io.bpu_to_ftq.resp.bits.s1.hasRedirect := false.B 559 io.bpu_to_ftq.resp.bits.s1.ftq_idx := DontCare 560 io.bpu_to_ftq.resp.bits.s2.valid := s2_fire && !s2_flush 561 io.bpu_to_ftq.resp.bits.s2.hasRedirect := s2_redirect 562 io.bpu_to_ftq.resp.bits.s2.ftq_idx := s2_ftq_idx 563 io.bpu_to_ftq.resp.bits.s3.valid := s3_fire && !s3_flush 564 io.bpu_to_ftq.resp.bits.s3.hasRedirect := s3_redirect 565 io.bpu_to_ftq.resp.bits.s3.ftq_idx := s3_ftq_idx 566 567 val redirect = io.ftq_to_bpu.redirect.bits 568 569 predictors.io.update := io.ftq_to_bpu.update 570 predictors.io.update.bits.ghist := getHist(io.ftq_to_bpu.update.bits.histPtr) 571 predictors.io.redirect := io.ftq_to_bpu.redirect 572 573 // Redirect logic 574 val shift = redirect.cfiUpdate.shift 575 val addIntoHist = redirect.cfiUpdate.addIntoHist 576 // TODO: remove these below 577 val shouldShiftVec = Mux(shift === 0.U, VecInit(0.U((1 << (log2Ceil(numBr) + 1)).W).asBools), VecInit((LowerMask(1.U << (shift-1.U))).asBools())) 578 // TODO end 579 580 val isBr = redirect.cfiUpdate.pd.isBr 581 val taken = redirect.cfiUpdate.taken 582 val real_br_taken_mask = (0 until numBr).map(i => shift === (i+1).U && taken && addIntoHist ) 583 584 val oldPtr = redirect.cfiUpdate.histPtr 585 val oldFh = redirect.cfiUpdate.folded_hist 586 val updated_ptr = oldPtr - shift 587 val updated_fh = VecInit((0 to numBr).map(i => oldFh.update(ghv, oldPtr, i, taken && addIntoHist)))(shift) 588 val redirect_ghv_wens = (0 until HistoryLength).map(n => 589 (0 until numBr).map(b => oldPtr.value === (n.U(log2Ceil(HistoryLength).W) + b.U) && shouldShiftVec(b) && io.ftq_to_bpu.redirect.valid)) 590 val redirect_ghv_wdatas = (0 until HistoryLength).map(n => 591 Mux1H( 592 (0 until numBr).map(b => oldPtr.value === (n.U(log2Ceil(HistoryLength).W) + b.U) && shouldShiftVec(b)), 593 real_br_taken_mask 594 ) 595 ) 596 597 if (EnableGHistDiff) { 598 val updated_ghist = WireInit(getHist(updated_ptr).asTypeOf(Vec(HistoryLength, Bool()))) 599 for (i <- 0 until numBr) { 600 when (shift >= (i+1).U) { 601 updated_ghist(i) := taken && addIntoHist && (i==0).B 602 } 603 } 604 when(io.ftq_to_bpu.redirect.valid) { 605 s0_ghist := updated_ghist.asUInt 606 } 607 } 608 609 610 // val updatedGh = oldGh.update(shift, taken && addIntoHist) 611 612 npcGen.register(io.ftq_to_bpu.redirect.valid, redirect.cfiUpdate.target, Some("redirect_target"), 2) 613 foldedGhGen.register(io.ftq_to_bpu.redirect.valid, updated_fh, Some("redirect_FGHT"), 2) 614 ghistPtrGen.register(io.ftq_to_bpu.redirect.valid, updated_ptr, Some("redirect_GHPtr"), 2) 615 ghvBitWriteGens.zip(redirect_ghv_wens).zipWithIndex.map{case ((b, w), i) => 616 b.register(w.reduce(_||_), redirect_ghv_wdatas(i), Some(s"redirect_new_bit_$i"), 2) 617 } 618 // no need to assign s0_last_pred 619 620 val need_reset = RegNext(reset.asBool) && !reset.asBool 621 622 // Reset 623 npcGen.register(need_reset, resetVector.U, Some("reset_pc"), 1) 624 foldedGhGen.register(need_reset, 0.U.asTypeOf(s0_folded_gh), Some("reset_FGH"), 1) 625 ghistPtrGen.register(need_reset, 0.U.asTypeOf(new CGHPtr), Some("reset_GHPtr"), 1) 626 627 s0_pc := npcGen() 628 s0_pc_reg := s0_pc 629 s0_folded_gh := foldedGhGen() 630 s0_ghist_ptr := ghistPtrGen() 631 (ghv_write_datas zip ghvBitWriteGens).map{case (wd, d) => wd := d()} 632 for (i <- 0 until HistoryLength) { 633 ghv_wens(i) := Seq(s1_ghv_wens, s2_ghv_wens, s3_ghv_wens, redirect_ghv_wens).map(_(i).reduce(_||_)).reduce(_||_) 634 when (ghv_wens(i)) { 635 ghv(i) := ghv_write_datas(i) 636 } 637 } 638 639 XSDebug(RegNext(reset.asBool) && !reset.asBool, "Reseting...\n") 640 XSDebug(io.ftq_to_bpu.update.valid, p"Update from ftq\n") 641 XSDebug(io.ftq_to_bpu.redirect.valid, p"Redirect from ftq\n") 642 643 XSDebug("[BP0] fire=%d pc=%x\n", s0_fire, s0_pc) 644 XSDebug("[BP1] v=%d r=%d cr=%d fire=%d flush=%d pc=%x\n", 645 s1_valid, s1_ready, s1_components_ready, s1_fire, s1_flush, s1_pc) 646 XSDebug("[BP2] v=%d r=%d cr=%d fire=%d redirect=%d flush=%d pc=%x\n", 647 s2_valid, s2_ready, s2_components_ready, s2_fire, s2_redirect, s2_flush, s2_pc) 648 XSDebug("[BP3] v=%d r=%d cr=%d fire=%d redirect=%d flush=%d pc=%x\n", 649 s3_valid, s3_ready, s3_components_ready, s3_fire, s3_redirect, s3_flush, s3_pc) 650 XSDebug("[FTQ] ready=%d\n", io.bpu_to_ftq.resp.ready) 651 XSDebug("resp.s1.target=%x\n", resp.s1.getTarget) 652 XSDebug("resp.s2.target=%x\n", resp.s2.getTarget) 653 // XSDebug("s0_ghist: %b\n", s0_ghist.predHist) 654 // XSDebug("s1_ghist: %b\n", s1_ghist.predHist) 655 // XSDebug("s2_ghist: %b\n", s2_ghist.predHist) 656 // XSDebug("s2_predicted_ghist: %b\n", s2_predicted_ghist.predHist) 657 XSDebug(p"s0_ghist_ptr: $s0_ghist_ptr\n") 658 XSDebug(p"s1_ghist_ptr: $s1_ghist_ptr\n") 659 XSDebug(p"s2_ghist_ptr: $s2_ghist_ptr\n") 660 XSDebug(p"s3_ghist_ptr: $s3_ghist_ptr\n") 661 662 io.ftq_to_bpu.update.bits.display(io.ftq_to_bpu.update.valid) 663 io.ftq_to_bpu.redirect.bits.display(io.ftq_to_bpu.redirect.valid) 664 665 666 XSPerfAccumulate("s2_redirect", s2_redirect) 667 XSPerfAccumulate("s3_redirect", s3_redirect) 668 669 val perfEvents = predictors.asInstanceOf[Composer].getPerfEvents 670 generatePerfEvent() 671} 672