1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.frontend 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import xiangshan._ 23import utils._ 24import utility._ 25 26import scala.math.min 27import xiangshan.backend.decode.ImmUnion 28 29trait HasBPUConst extends HasXSParameter { 30 val MaxMetaBaseLength = if (!env.FPGAPlatform) 512 else 247 // TODO: Reduce meta length 31 val MaxMetaLength = if (HasHExtension) MaxMetaBaseLength + 4 else MaxMetaBaseLength 32 val MaxBasicBlockSize = 32 33 val LHistoryLength = 32 34 // val numBr = 2 35 val useBPD = true 36 val useLHist = true 37 val numBrSlot = numBr-1 38 val totalSlot = numBrSlot + 1 39 40 val numDup = 4 41 42 def BP_STAGES = (0 until 3).map(_.U(2.W)) 43 def BP_S1 = BP_STAGES(0) 44 def BP_S2 = BP_STAGES(1) 45 def BP_S3 = BP_STAGES(2) 46 47 def dup_seq[T](src: T, num: Int = numDup) = Seq.tabulate(num)(n => src) 48 def dup[T <: Data](src: T, num: Int = numDup) = VecInit(Seq.tabulate(num)(n => src)) 49 def dup_wire[T <: Data](src: T, num: Int = numDup) = Wire(Vec(num, src.cloneType)) 50 def dup_idx = Seq.tabulate(numDup)(n => n.toString()) 51 val numBpStages = BP_STAGES.length 52 53 val debug = true 54 // TODO: Replace log2Up by log2Ceil 55} 56 57trait HasBPUParameter extends HasXSParameter with HasBPUConst { 58 val BPUDebug = true && !env.FPGAPlatform && env.EnablePerfDebug 59 val EnableCFICommitLog = true 60 val EnbaleCFIPredLog = true 61 val EnableBPUTimeRecord = (EnableCFICommitLog || EnbaleCFIPredLog) && !env.FPGAPlatform 62 val EnableCommit = false 63} 64 65class BPUCtrl(implicit p: Parameters) extends XSBundle { 66 val ubtb_enable = Bool() 67 val btb_enable = Bool() 68 val bim_enable = Bool() 69 val tage_enable = Bool() 70 val sc_enable = Bool() 71 val ras_enable = Bool() 72 val loop_enable = Bool() 73} 74 75trait BPUUtils extends HasXSParameter { 76 // circular shifting 77 def circularShiftLeft(source: UInt, len: Int, shamt: UInt): UInt = { 78 val res = Wire(UInt(len.W)) 79 val higher = source << shamt 80 val lower = source >> (len.U - shamt) 81 res := higher | lower 82 res 83 } 84 85 def circularShiftRight(source: UInt, len: Int, shamt: UInt): UInt = { 86 val res = Wire(UInt(len.W)) 87 val higher = source << (len.U - shamt) 88 val lower = source >> shamt 89 res := higher | lower 90 res 91 } 92 93 // To be verified 94 def satUpdate(old: UInt, len: Int, taken: Bool): UInt = { 95 val oldSatTaken = old === ((1 << len)-1).U 96 val oldSatNotTaken = old === 0.U 97 Mux(oldSatTaken && taken, ((1 << len)-1).U, 98 Mux(oldSatNotTaken && !taken, 0.U, 99 Mux(taken, old + 1.U, old - 1.U))) 100 } 101 102 def signedSatUpdate(old: SInt, len: Int, taken: Bool): SInt = { 103 val oldSatTaken = old === ((1 << (len-1))-1).S 104 val oldSatNotTaken = old === (-(1 << (len-1))).S 105 Mux(oldSatTaken && taken, ((1 << (len-1))-1).S, 106 Mux(oldSatNotTaken && !taken, (-(1 << (len-1))).S, 107 Mux(taken, old + 1.S, old - 1.S))) 108 } 109 110 def getFallThroughAddr(start: UInt, carry: Bool, pft: UInt) = { 111 val higher = start.head(VAddrBits-log2Ceil(PredictWidth)-instOffsetBits) 112 Cat(Mux(carry, higher+1.U, higher), pft, 0.U(instOffsetBits.W)) 113 } 114 115 def foldTag(tag: UInt, l: Int): UInt = { 116 val nChunks = (tag.getWidth + l - 1) / l 117 val chunks = (0 until nChunks).map { i => 118 tag(min((i+1)*l, tag.getWidth)-1, i*l) 119 } 120 ParallelXOR(chunks) 121 } 122} 123 124class BasePredictorInput (implicit p: Parameters) extends XSBundle with HasBPUConst { 125 def nInputs = 1 126 127 val s0_pc = Vec(numDup, UInt(VAddrBits.W)) 128 129 val folded_hist = Vec(numDup, new AllFoldedHistories(foldedGHistInfos)) 130 val s1_folded_hist = Vec(numDup, new AllFoldedHistories(foldedGHistInfos)) 131 val ghist = UInt(HistoryLength.W) 132 133 val resp_in = Vec(nInputs, new BranchPredictionResp) 134 135 // val final_preds = Vec(numBpStages, new) 136 // val toFtq_fire = Bool() 137 138 // val s0_all_ready = Bool() 139} 140 141class BasePredictorOutput (implicit p: Parameters) extends BranchPredictionResp {} 142 143class BasePredictorIO (implicit p: Parameters) extends XSBundle with HasBPUConst { 144 val reset_vector = Input(UInt(PAddrBits.W)) 145 val in = Flipped(DecoupledIO(new BasePredictorInput)) // TODO: Remove DecoupledIO 146 // val out = DecoupledIO(new BasePredictorOutput) 147 val out = Output(new BasePredictorOutput) 148 // val flush_out = Valid(UInt(VAddrBits.W)) 149 150 val fauftb_entry_in = Input(new FTBEntry) 151 val fauftb_entry_hit_in = Input(Bool()) 152 val fauftb_entry_out = Output(new FTBEntry) 153 val fauftb_entry_hit_out = Output(Bool()) 154 155 val ctrl = Input(new BPUCtrl) 156 157 val s0_fire = Input(Vec(numDup, Bool())) 158 val s1_fire = Input(Vec(numDup, Bool())) 159 val s2_fire = Input(Vec(numDup, Bool())) 160 val s3_fire = Input(Vec(numDup, Bool())) 161 162 val s2_redirect = Input(Vec(numDup, Bool())) 163 val s3_redirect = Input(Vec(numDup, Bool())) 164 165 val s1_ready = Output(Bool()) 166 val s2_ready = Output(Bool()) 167 val s3_ready = Output(Bool()) 168 169 val update = Flipped(Valid(new BranchPredictionUpdate)) 170 val redirect = Flipped(Valid(new BranchPredictionRedirect)) 171 val redirectFromIFU = Input(Bool()) 172} 173 174abstract class BasePredictor(implicit p: Parameters) extends XSModule 175 with HasBPUConst with BPUUtils with HasPerfEvents { 176 val meta_size = 0 177 val spec_meta_size = 0 178 val is_fast_pred = false 179 val io = IO(new BasePredictorIO()) 180 181 io.out := io.in.bits.resp_in(0) 182 183 io.fauftb_entry_out := io.fauftb_entry_in 184 io.fauftb_entry_hit_out := io.fauftb_entry_hit_in 185 186 io.out.last_stage_meta := 0.U 187 188 io.in.ready := !io.redirect.valid 189 190 io.s1_ready := true.B 191 io.s2_ready := true.B 192 io.s3_ready := true.B 193 194 val reset_vector = DelayN(io.reset_vector, 5) 195 196 val s0_pc_dup = WireInit(io.in.bits.s0_pc) // fetchIdx(io.f0_pc) 197 val s1_pc_dup = s0_pc_dup.zip(io.s0_fire).map {case (s0_pc, s0_fire) => RegEnable(s0_pc, s0_fire)} 198 val s2_pc_dup = s1_pc_dup.zip(io.s1_fire).map {case (s1_pc, s1_fire) => RegEnable(s1_pc, s1_fire)} 199 val s3_pc_dup = s2_pc_dup.zip(io.s2_fire).map {case (s2_pc, s2_fire) => RegEnable(s2_pc, s2_fire)} 200 201 when (RegNext(RegNext(reset.asBool) && !reset.asBool)) { 202 s1_pc_dup.map{case s1_pc => s1_pc := reset_vector} 203 } 204 205 io.out.s1.pc := s1_pc_dup 206 io.out.s2.pc := s2_pc_dup 207 io.out.s3.pc := s3_pc_dup 208 209 val perfEvents: Seq[(String, UInt)] = Seq() 210 211 212 def getFoldedHistoryInfo: Option[Set[FoldedHistoryInfo]] = None 213} 214 215class FakePredictor(implicit p: Parameters) extends BasePredictor { 216 io.in.ready := true.B 217 io.out.last_stage_meta := 0.U 218 io.out := io.in.bits.resp_in(0) 219} 220 221class BpuToFtqIO(implicit p: Parameters) extends XSBundle { 222 val resp = DecoupledIO(new BpuToFtqBundle()) 223} 224 225class PredictorIO(implicit p: Parameters) extends XSBundle { 226 val bpu_to_ftq = new BpuToFtqIO() 227 val ftq_to_bpu = Flipped(new FtqToBpuIO) 228 val ctrl = Input(new BPUCtrl) 229 val reset_vector = Input(UInt(PAddrBits.W)) 230} 231 232class Predictor(implicit p: Parameters) extends XSModule with HasBPUConst with HasPerfEvents with HasCircularQueuePtrHelper { 233 val io = IO(new PredictorIO) 234 235 val ctrl = DelayN(io.ctrl, 1) 236 val predictors = Module(if (useBPD) new Composer else new FakePredictor) 237 238 def numOfStage = 3 239 require(numOfStage > 1, "BPU numOfStage must be greater than 1") 240 val topdown_stages = RegInit(VecInit(Seq.fill(numOfStage)(0.U.asTypeOf(new FrontendTopDownBundle)))) 241 242 // following can only happen on s1 243 val controlRedirectBubble = Wire(Bool()) 244 val ControlBTBMissBubble = Wire(Bool()) 245 val TAGEMissBubble = Wire(Bool()) 246 val SCMissBubble = Wire(Bool()) 247 val ITTAGEMissBubble = Wire(Bool()) 248 val RASMissBubble = Wire(Bool()) 249 250 val memVioRedirectBubble = Wire(Bool()) 251 val otherRedirectBubble = Wire(Bool()) 252 val btbMissBubble = Wire(Bool()) 253 otherRedirectBubble := false.B 254 memVioRedirectBubble := false.B 255 256 // override can happen between s1-s2 and s2-s3 257 val overrideBubble = Wire(Vec(numOfStage - 1, Bool())) 258 def overrideStage = 1 259 // ftq update block can happen on s1, s2 and s3 260 val ftqUpdateBubble = Wire(Vec(numOfStage, Bool())) 261 def ftqUpdateStage = 0 262 // ftq full stall only happens on s3 (last stage) 263 val ftqFullStall = Wire(Bool()) 264 265 // by default, no bubble event 266 topdown_stages(0) := 0.U.asTypeOf(new FrontendTopDownBundle) 267 // event movement driven by clock only 268 for (i <- 0 until numOfStage - 1) { 269 topdown_stages(i + 1) := topdown_stages(i) 270 } 271 272 273 274 // ctrl signal 275 predictors.io.ctrl := ctrl 276 predictors.io.reset_vector := io.reset_vector 277 278 279 val reset_vector = DelayN(io.reset_vector, 5) 280 281 val s0_stall_dup = dup_wire(Bool()) // For some reason s0 stalled, usually FTQ Full 282 val s0_fire_dup, s1_fire_dup, s2_fire_dup, s3_fire_dup = dup_wire(Bool()) 283 val s1_valid_dup, s2_valid_dup, s3_valid_dup = dup_seq(RegInit(false.B)) 284 val s1_ready_dup, s2_ready_dup, s3_ready_dup = dup_wire(Bool()) 285 val s1_components_ready_dup, s2_components_ready_dup, s3_components_ready_dup = dup_wire(Bool()) 286 287 val s0_pc_dup = dup(WireInit(0.U.asTypeOf(UInt(VAddrBits.W)))) 288 val s0_pc_reg_dup = s0_pc_dup.zip(s0_stall_dup).map{ case (s0_pc, s0_stall) => RegEnable(s0_pc, !s0_stall) } 289 when (RegNext(RegNext(reset.asBool) && !reset.asBool)) { 290 s0_pc_reg_dup.map{case s0_pc => s0_pc := reset_vector} 291 } 292 val s1_pc = RegEnable(s0_pc_dup(0), s0_fire_dup(0)) 293 val s2_pc = RegEnable(s1_pc, s1_fire_dup(0)) 294 val s3_pc = RegEnable(s2_pc, s2_fire_dup(0)) 295 296 val s0_folded_gh_dup = dup_wire(new AllFoldedHistories(foldedGHistInfos)) 297 val s0_folded_gh_reg_dup = s0_folded_gh_dup.zip(s0_stall_dup).map{ 298 case (x, s0_stall) => RegEnable(x, 0.U.asTypeOf(s0_folded_gh_dup(0)), !s0_stall) 299 } 300 val s1_folded_gh_dup = RegEnable(s0_folded_gh_dup, 0.U.asTypeOf(s0_folded_gh_dup), s0_fire_dup(1)) 301 val s2_folded_gh_dup = RegEnable(s1_folded_gh_dup, 0.U.asTypeOf(s0_folded_gh_dup), s1_fire_dup(1)) 302 val s3_folded_gh_dup = RegEnable(s2_folded_gh_dup, 0.U.asTypeOf(s0_folded_gh_dup), s2_fire_dup(1)) 303 304 val s0_last_br_num_oh_dup = dup_wire(UInt((numBr+1).W)) 305 val s0_last_br_num_oh_reg_dup = s0_last_br_num_oh_dup.zip(s0_stall_dup).map{ 306 case (x, s0_stall) => RegEnable(x, 0.U, !s0_stall) 307 } 308 val s1_last_br_num_oh_dup = RegEnable(s0_last_br_num_oh_dup, 0.U.asTypeOf(s0_last_br_num_oh_dup), s0_fire_dup(1)) 309 val s2_last_br_num_oh_dup = RegEnable(s1_last_br_num_oh_dup, 0.U.asTypeOf(s0_last_br_num_oh_dup), s1_fire_dup(1)) 310 val s3_last_br_num_oh_dup = RegEnable(s2_last_br_num_oh_dup, 0.U.asTypeOf(s0_last_br_num_oh_dup), s2_fire_dup(1)) 311 312 val s0_ahead_fh_oldest_bits_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos)) 313 val s0_ahead_fh_oldest_bits_reg_dup = s0_ahead_fh_oldest_bits_dup.zip(s0_stall_dup).map{ 314 case (x, s0_stall) => RegEnable(x, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup(0)), !s0_stall) 315 } 316 val s1_ahead_fh_oldest_bits_dup = RegEnable(s0_ahead_fh_oldest_bits_dup, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup), s0_fire_dup(1)) 317 val s2_ahead_fh_oldest_bits_dup = RegEnable(s1_ahead_fh_oldest_bits_dup, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup), s1_fire_dup(1)) 318 val s3_ahead_fh_oldest_bits_dup = RegEnable(s2_ahead_fh_oldest_bits_dup, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup), s2_fire_dup(1)) 319 320 val npcGen_dup = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[UInt]) 321 val foldedGhGen_dup = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[AllFoldedHistories]) 322 val ghistPtrGen_dup = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[CGHPtr]) 323 val lastBrNumOHGen_dup = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[UInt]) 324 val aheadFhObGen_dup = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[AllAheadFoldedHistoryOldestBits]) 325 326 val ghvBitWriteGens = Seq.tabulate(HistoryLength)(n => new PhyPriorityMuxGenerator[Bool]) 327 // val ghistGen = new PhyPriorityMuxGenerator[UInt] 328 329 val ghv = RegInit(0.U.asTypeOf(Vec(HistoryLength, Bool()))) 330 val ghv_wire = WireInit(ghv) 331 332 val s0_ghist = WireInit(0.U.asTypeOf(UInt(HistoryLength.W))) 333 334 335 println(f"history buffer length ${HistoryLength}") 336 val ghv_write_datas = Wire(Vec(HistoryLength, Bool())) 337 val ghv_wens = Wire(Vec(HistoryLength, Bool())) 338 339 val s0_ghist_ptr_dup = dup_wire(new CGHPtr) 340 val s0_ghist_ptr_reg_dup = s0_ghist_ptr_dup.zip(s0_stall_dup).map{ 341 case (x, s0_stall) => RegEnable(x, 0.U.asTypeOf(new CGHPtr), !s0_stall) 342 } 343 val s1_ghist_ptr_dup = RegEnable(s0_ghist_ptr_dup, 0.U.asTypeOf(s0_ghist_ptr_dup), s0_fire_dup(1)) 344 val s2_ghist_ptr_dup = RegEnable(s1_ghist_ptr_dup, 0.U.asTypeOf(s0_ghist_ptr_dup), s1_fire_dup(1)) 345 val s3_ghist_ptr_dup = RegEnable(s2_ghist_ptr_dup, 0.U.asTypeOf(s0_ghist_ptr_dup), s2_fire_dup(1)) 346 347 def getHist(ptr: CGHPtr): UInt = (Cat(ghv_wire.asUInt, ghv_wire.asUInt) >> (ptr.value+1.U))(HistoryLength-1, 0) 348 s0_ghist := getHist(s0_ghist_ptr_dup(0)) 349 350 val resp = predictors.io.out 351 352 353 val toFtq_fire = io.bpu_to_ftq.resp.valid && io.bpu_to_ftq.resp.ready 354 355 val s1_flush_dup, s2_flush_dup, s3_flush_dup = dup_wire(Bool()) 356 val s2_redirect_dup, s3_redirect_dup = dup_wire(Bool()) 357 358 // predictors.io := DontCare 359 predictors.io.in.valid := s0_fire_dup(0) 360 predictors.io.in.bits.s0_pc := s0_pc_dup 361 predictors.io.in.bits.ghist := s0_ghist 362 predictors.io.in.bits.folded_hist := s0_folded_gh_dup 363 predictors.io.in.bits.s1_folded_hist := s1_folded_gh_dup 364 predictors.io.in.bits.resp_in(0) := (0.U).asTypeOf(new BranchPredictionResp) 365 predictors.io.fauftb_entry_in := (0.U).asTypeOf(new FTBEntry) 366 predictors.io.fauftb_entry_hit_in := false.B 367 predictors.io.redirectFromIFU := RegNext(io.ftq_to_bpu.redirctFromIFU, init=false.B) 368 // predictors.io.in.bits.resp_in(0).s1.pc := s0_pc 369 // predictors.io.in.bits.toFtq_fire := toFtq_fire 370 371 // predictors.io.out.ready := io.bpu_to_ftq.resp.ready 372 373 val redirect_req = io.ftq_to_bpu.redirect 374 val do_redirect_dup = dup_seq(RegNextWithEnable(redirect_req)) 375 376 // Pipeline logic 377 s2_redirect_dup.map(_ := false.B) 378 s3_redirect_dup.map(_ := false.B) 379 380 s3_flush_dup.map(_ := redirect_req.valid) // flush when redirect comes 381 for (((s2_flush, s3_flush), s3_redirect) <- s2_flush_dup zip s3_flush_dup zip s3_redirect_dup) 382 s2_flush := s3_flush || s3_redirect 383 for (((s1_flush, s2_flush), s2_redirect) <- s1_flush_dup zip s2_flush_dup zip s2_redirect_dup) 384 s1_flush := s2_flush || s2_redirect 385 386 387 s1_components_ready_dup.map(_ := predictors.io.s1_ready) 388 for (((s1_ready, s1_fire), s1_valid) <- s1_ready_dup zip s1_fire_dup zip s1_valid_dup) 389 s1_ready := s1_fire || !s1_valid 390 for (((s0_fire, s1_components_ready), s1_ready) <- s0_fire_dup zip s1_components_ready_dup zip s1_ready_dup) 391 s0_fire := s1_components_ready && s1_ready 392 predictors.io.s0_fire := s0_fire_dup 393 394 s2_components_ready_dup.map(_ := predictors.io.s2_ready) 395 for (((s2_ready, s2_fire), s2_valid) <- s2_ready_dup zip s2_fire_dup zip s2_valid_dup) 396 s2_ready := s2_fire || !s2_valid 397 for ((((s1_fire, s2_components_ready), s2_ready), s1_valid) <- s1_fire_dup zip s2_components_ready_dup zip s2_ready_dup zip s1_valid_dup) 398 s1_fire := s1_valid && s2_components_ready && s2_ready && io.bpu_to_ftq.resp.ready 399 400 s3_components_ready_dup.map(_ := predictors.io.s3_ready) 401 for (((s3_ready, s3_fire), s3_valid) <- s3_ready_dup zip s3_fire_dup zip s3_valid_dup) 402 s3_ready := s3_fire || !s3_valid 403 for ((((s2_fire, s3_components_ready), s3_ready), s2_valid) <- s2_fire_dup zip s3_components_ready_dup zip s3_ready_dup zip s2_valid_dup) 404 s2_fire := s2_valid && s3_components_ready && s3_ready 405 406 for ((((s0_fire, s1_flush), s1_fire), s1_valid) <- s0_fire_dup zip s1_flush_dup zip s1_fire_dup zip s1_valid_dup) { 407 when (redirect_req.valid) { s1_valid := false.B } 408 .elsewhen(s0_fire) { s1_valid := true.B } 409 .elsewhen(s1_flush) { s1_valid := false.B } 410 .elsewhen(s1_fire) { s1_valid := false.B } 411 } 412 predictors.io.s1_fire := s1_fire_dup 413 414 s2_fire_dup := s2_valid_dup 415 416 for (((((s1_fire, s2_flush), s2_fire), s2_valid), s1_flush) <- 417 s1_fire_dup zip s2_flush_dup zip s2_fire_dup zip s2_valid_dup zip s1_flush_dup) { 418 419 when (s2_flush) { s2_valid := false.B } 420 .elsewhen(s1_fire) { s2_valid := !s1_flush } 421 .elsewhen(s2_fire) { s2_valid := false.B } 422 } 423 424 predictors.io.s2_fire := s2_fire_dup 425 predictors.io.s2_redirect := s2_redirect_dup 426 427 s3_fire_dup := s3_valid_dup 428 429 for (((((s2_fire, s3_flush), s3_fire), s3_valid), s2_flush) <- 430 s2_fire_dup zip s3_flush_dup zip s3_fire_dup zip s3_valid_dup zip s2_flush_dup) { 431 432 when (s3_flush) { s3_valid := false.B } 433 .elsewhen(s2_fire) { s3_valid := !s2_flush } 434 .elsewhen(s3_fire) { s3_valid := false.B } 435 } 436 437 predictors.io.s3_fire := s3_fire_dup 438 predictors.io.s3_redirect := s3_redirect_dup 439 440 441 io.bpu_to_ftq.resp.valid := 442 s1_valid_dup(2) && s2_components_ready_dup(2) && s2_ready_dup(2) || 443 s2_fire_dup(2) && s2_redirect_dup(2) || 444 s3_fire_dup(2) && s3_redirect_dup(2) 445 io.bpu_to_ftq.resp.bits := predictors.io.out 446 io.bpu_to_ftq.resp.bits.last_stage_spec_info.histPtr := s3_ghist_ptr_dup(2) 447 448 val full_pred_diff = WireInit(false.B) 449 val full_pred_diff_stage = WireInit(0.U) 450 val full_pred_diff_offset = WireInit(0.U) 451 for (i <- 0 until numDup - 1) { 452 when (io.bpu_to_ftq.resp.valid && 453 ((io.bpu_to_ftq.resp.bits.s1.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s1.full_pred(i+1).asTypeOf(UInt()) && io.bpu_to_ftq.resp.bits.s1.full_pred(i).hit) || 454 (io.bpu_to_ftq.resp.bits.s2.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s2.full_pred(i+1).asTypeOf(UInt()) && io.bpu_to_ftq.resp.bits.s2.full_pred(i).hit) || 455 (io.bpu_to_ftq.resp.bits.s3.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s3.full_pred(i+1).asTypeOf(UInt()) && io.bpu_to_ftq.resp.bits.s3.full_pred(i).hit))) { 456 full_pred_diff := true.B 457 full_pred_diff_offset := i.U 458 when (io.bpu_to_ftq.resp.bits.s1.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s1.full_pred(i+1).asTypeOf(UInt())) { 459 full_pred_diff_stage := 1.U 460 } .elsewhen (io.bpu_to_ftq.resp.bits.s2.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s2.full_pred(i+1).asTypeOf(UInt())) { 461 full_pred_diff_stage := 2.U 462 } .otherwise { 463 full_pred_diff_stage := 3.U 464 } 465 } 466 } 467 XSError(full_pred_diff, "Full prediction difference detected!") 468 469 // s0_stall should be exclusive with any other PC source 470 s0_stall_dup.zip(s1_valid_dup).zip(s2_redirect_dup).zip(s3_redirect_dup).zip(do_redirect_dup).foreach { 471 case ((((s0_stall, s1_valid), s2_redirect), s3_redirect), do_redirect) => { 472 s0_stall := !(s1_valid || s2_redirect || s3_redirect || do_redirect.valid) 473 } 474 } 475 XSError(s0_stall_dup(0) && s0_pc_dup(0) =/= s0_pc_reg_dup(0), "s0_stall but s0_pc is differenct from s0_pc_reg") 476 477 npcGen_dup.zip(s0_pc_reg_dup).map{ case (gen, reg) => 478 gen.register(true.B, reg, Some("stallPC"), 0)} 479 foldedGhGen_dup.zip(s0_folded_gh_reg_dup).map{ case (gen, reg) => 480 gen.register(true.B, reg, Some("stallFGH"), 0)} 481 ghistPtrGen_dup.zip(s0_ghist_ptr_reg_dup).map{ case (gen, reg) => 482 gen.register(true.B, reg, Some("stallGHPtr"), 0)} 483 lastBrNumOHGen_dup.zip(s0_last_br_num_oh_reg_dup).map{ case (gen, reg) => 484 gen.register(true.B, reg, Some("stallBrNumOH"), 0)} 485 aheadFhObGen_dup.zip(s0_ahead_fh_oldest_bits_reg_dup).map{ case (gen, reg) => 486 gen.register(true.B, reg, Some("stallAFHOB"), 0)} 487 488 // assign pred cycle for profiling 489 io.bpu_to_ftq.resp.bits.s1.full_pred.map(_.predCycle.map(_ := GTimer())) 490 io.bpu_to_ftq.resp.bits.s2.full_pred.map(_.predCycle.map(_ := GTimer())) 491 io.bpu_to_ftq.resp.bits.s3.full_pred.map(_.predCycle.map(_ := GTimer())) 492 493 494 495 // History manage 496 // s1 497 val s1_possible_predicted_ghist_ptrs_dup = s1_ghist_ptr_dup.map(ptr => (0 to numBr).map(ptr - _.U)) 498 val s1_predicted_ghist_ptr_dup = s1_possible_predicted_ghist_ptrs_dup.zip(resp.s1.lastBrPosOH).map{ case (ptr, oh) => Mux1H(oh, ptr)} 499 val s1_possible_predicted_fhs_dup = 500 for (((((fgh, afh), br_num_oh), t), br_pos_oh) <- 501 s1_folded_gh_dup zip s1_ahead_fh_oldest_bits_dup zip s1_last_br_num_oh_dup zip resp.s1.brTaken zip resp.s1.lastBrPosOH) 502 yield (0 to numBr).map(i => 503 fgh.update(afh, br_num_oh, i, t & br_pos_oh(i)) 504 ) 505 val s1_predicted_fh_dup = resp.s1.lastBrPosOH.zip(s1_possible_predicted_fhs_dup).map{ case (oh, fh) => Mux1H(oh, fh)} 506 507 val s1_ahead_fh_ob_src_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos)) 508 s1_ahead_fh_ob_src_dup.zip(s1_ghist_ptr_dup).map{ case (src, ptr) => src.read(ghv, ptr)} 509 510 if (EnableGHistDiff) { 511 val s1_predicted_ghist = WireInit(getHist(s1_predicted_ghist_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool()))) 512 for (i <- 0 until numBr) { 513 when (resp.s1.shouldShiftVec(0)(i)) { 514 s1_predicted_ghist(i) := resp.s1.brTaken(0) && (i==0).B 515 } 516 } 517 when (s1_valid_dup(0)) { 518 s0_ghist := s1_predicted_ghist.asUInt 519 } 520 } 521 522 val s1_ghv_wens = (0 until HistoryLength).map(n => 523 (0 until numBr).map(b => (s1_ghist_ptr_dup(0)).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s1.shouldShiftVec(0)(b) && s1_valid_dup(0))) 524 val s1_ghv_wdatas = (0 until HistoryLength).map(n => 525 Mux1H( 526 (0 until numBr).map(b => ( 527 (s1_ghist_ptr_dup(0)).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s1.shouldShiftVec(0)(b), 528 resp.s1.brTaken(0) && resp.s1.lastBrPosOH(0)(b+1) 529 )) 530 ) 531 ) 532 533 534 for (((npcGen, s1_valid), s1_target) <- npcGen_dup zip s1_valid_dup zip resp.s1.getTarget) 535 npcGen.register(s1_valid, s1_target, Some("s1_target"), 4) 536 for (((foldedGhGen, s1_valid), s1_predicted_fh) <- foldedGhGen_dup zip s1_valid_dup zip s1_predicted_fh_dup) 537 foldedGhGen.register(s1_valid, s1_predicted_fh, Some("s1_FGH"), 4) 538 for (((ghistPtrGen, s1_valid), s1_predicted_ghist_ptr) <- ghistPtrGen_dup zip s1_valid_dup zip s1_predicted_ghist_ptr_dup) 539 ghistPtrGen.register(s1_valid, s1_predicted_ghist_ptr, Some("s1_GHPtr"), 4) 540 for (((lastBrNumOHGen, s1_valid), s1_brPosOH) <- lastBrNumOHGen_dup zip s1_valid_dup zip resp.s1.lastBrPosOH.map(_.asUInt)) 541 lastBrNumOHGen.register(s1_valid, s1_brPosOH, Some("s1_BrNumOH"), 4) 542 for (((aheadFhObGen, s1_valid), s1_ahead_fh_ob_src) <- aheadFhObGen_dup zip s1_valid_dup zip s1_ahead_fh_ob_src_dup) 543 aheadFhObGen.register(s1_valid, s1_ahead_fh_ob_src, Some("s1_AFHOB"), 4) 544 ghvBitWriteGens.zip(s1_ghv_wens).zipWithIndex.map{case ((b, w), i) => 545 b.register(w.reduce(_||_), s1_ghv_wdatas(i), Some(s"s1_new_bit_$i"), 4) 546 } 547 548 class PreviousPredInfo extends Bundle { 549 val hit = Vec(numDup, Bool()) 550 val target = Vec(numDup, UInt(VAddrBits.W)) 551 val lastBrPosOH = Vec(numDup, Vec(numBr+1, Bool())) 552 val taken = Vec(numDup, Bool()) 553 val takenMask = Vec(numDup, Vec(numBr, Bool())) 554 val cfiIndex = Vec(numDup, UInt(log2Ceil(PredictWidth).W)) 555 } 556 557 def preds_needs_redirect_vec_dup(x: PreviousPredInfo, y: BranchPredictionBundle) = { 558 // Timing optimization 559 // We first compare all target with previous stage target, 560 // then select the difference by taken & hit 561 // Usually target is generated quicker than taken, so do target compare before select can help timing 562 val targetDiffVec: IndexedSeq[Vec[Bool]] = 563 x.target.zip(y.getAllTargets).map { 564 case (xTarget, yAllTarget) => VecInit(yAllTarget.map(_ =/= xTarget)) 565 } // [numDup][all Target comparison] 566 val targetDiff : IndexedSeq[Bool] = 567 targetDiffVec.zip(x.hit).zip(x.takenMask).map { 568 case ((diff, hit), takenMask) => selectByTaken(takenMask, hit, diff) 569 } // [numDup] 570 571 val lastBrPosOHDiff: IndexedSeq[Bool] = x.lastBrPosOH.zip(y.lastBrPosOH).map { case (oh1, oh2) => oh1.asUInt =/= oh2.asUInt } 572 val takenDiff : IndexedSeq[Bool] = x.taken.zip(y.taken).map { case (t1, t2) => t1 =/= t2 } 573 val takenOffsetDiff: IndexedSeq[Bool] = x.cfiIndex.zip(y.cfiIndex).zip(x.taken).zip(y.taken).map { case (((i1, i2), xt), yt) => xt && yt && i1 =/= i2.bits } 574 VecInit( 575 for ((((tgtd, lbpohd), tkd), tod) <- 576 targetDiff zip lastBrPosOHDiff zip takenDiff zip takenOffsetDiff) 577 yield VecInit(tgtd, lbpohd, tkd, tod) 578 // x.shouldShiftVec.asUInt =/= y.shouldShiftVec.asUInt, 579 // x.brTaken =/= y.brTaken 580 ) 581 } 582 583 // s2 584 val s2_possible_predicted_ghist_ptrs_dup = s2_ghist_ptr_dup.map(ptr => (0 to numBr).map(ptr - _.U)) 585 val s2_predicted_ghist_ptr_dup = s2_possible_predicted_ghist_ptrs_dup.zip(resp.s2.lastBrPosOH).map{ case (ptr, oh) => Mux1H(oh, ptr)} 586 587 val s2_possible_predicted_fhs_dup = 588 for ((((fgh, afh), br_num_oh), full_pred) <- 589 s2_folded_gh_dup zip s2_ahead_fh_oldest_bits_dup zip s2_last_br_num_oh_dup zip resp.s2.full_pred) 590 yield (0 to numBr).map(i => 591 fgh.update(afh, br_num_oh, i, if (i > 0) full_pred.br_taken_mask(i-1) else false.B) 592 ) 593 val s2_predicted_fh_dup = resp.s2.lastBrPosOH.zip(s2_possible_predicted_fhs_dup).map{ case (oh, fh) => Mux1H(oh, fh)} 594 595 val s2_ahead_fh_ob_src_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos)) 596 s2_ahead_fh_ob_src_dup.zip(s2_ghist_ptr_dup).map{ case (src, ptr) => src.read(ghv, ptr)} 597 598 if (EnableGHistDiff) { 599 val s2_predicted_ghist = WireInit(getHist(s2_predicted_ghist_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool()))) 600 for (i <- 0 until numBr) { 601 when (resp.s2.shouldShiftVec(0)(i)) { 602 s2_predicted_ghist(i) := resp.s2.brTaken(0) && (i==0).B 603 } 604 } 605 when(s2_redirect_dup(0)) { 606 s0_ghist := s2_predicted_ghist.asUInt 607 } 608 } 609 610 val s2_ghv_wens = (0 until HistoryLength).map(n => 611 (0 until numBr).map(b => (s2_ghist_ptr_dup(0)).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s2.shouldShiftVec(0)(b) && s2_redirect_dup(0))) 612 val s2_ghv_wdatas = (0 until HistoryLength).map(n => 613 Mux1H( 614 (0 until numBr).map(b => ( 615 (s2_ghist_ptr_dup(0)).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s2.shouldShiftVec(0)(b), 616 resp.s2.full_pred(0).real_br_taken_mask()(b) 617 )) 618 ) 619 ) 620 621 val s1_pred_info = Wire(new PreviousPredInfo) 622 s1_pred_info.hit := resp.s1.full_pred.map(_.hit) 623 s1_pred_info.target := resp.s1.getTarget 624 s1_pred_info.lastBrPosOH := resp.s1.lastBrPosOH 625 s1_pred_info.taken := resp.s1.taken 626 s1_pred_info.takenMask := resp.s1.full_pred.map(_.taken_mask_on_slot) 627 s1_pred_info.cfiIndex := resp.s1.cfiIndex.map { case x => x.bits } 628 629 val previous_s1_pred_info = RegEnable(s1_pred_info, 0.U.asTypeOf(new PreviousPredInfo), s1_fire_dup(0)) 630 631 val s2_redirect_s1_last_pred_vec_dup = preds_needs_redirect_vec_dup(previous_s1_pred_info, resp.s2) 632 633 for (((s2_redirect, s2_fire), s2_redirect_s1_last_pred_vec) <- s2_redirect_dup zip s2_fire_dup zip s2_redirect_s1_last_pred_vec_dup) 634 s2_redirect := s2_fire && s2_redirect_s1_last_pred_vec.reduce(_||_) 635 636 637 for (((npcGen, s2_redirect), s2_target) <- npcGen_dup zip s2_redirect_dup zip resp.s2.getTarget) 638 npcGen.register(s2_redirect, s2_target, Some("s2_target"), 5) 639 for (((foldedGhGen, s2_redirect), s2_predicted_fh) <- foldedGhGen_dup zip s2_redirect_dup zip s2_predicted_fh_dup) 640 foldedGhGen.register(s2_redirect, s2_predicted_fh, Some("s2_FGH"), 5) 641 for (((ghistPtrGen, s2_redirect), s2_predicted_ghist_ptr) <- ghistPtrGen_dup zip s2_redirect_dup zip s2_predicted_ghist_ptr_dup) 642 ghistPtrGen.register(s2_redirect, s2_predicted_ghist_ptr, Some("s2_GHPtr"), 5) 643 for (((lastBrNumOHGen, s2_redirect), s2_brPosOH) <- lastBrNumOHGen_dup zip s2_redirect_dup zip resp.s2.lastBrPosOH.map(_.asUInt)) 644 lastBrNumOHGen.register(s2_redirect, s2_brPosOH, Some("s2_BrNumOH"), 5) 645 for (((aheadFhObGen, s2_redirect), s2_ahead_fh_ob_src) <- aheadFhObGen_dup zip s2_redirect_dup zip s2_ahead_fh_ob_src_dup) 646 aheadFhObGen.register(s2_redirect, s2_ahead_fh_ob_src, Some("s2_AFHOB"), 5) 647 ghvBitWriteGens.zip(s2_ghv_wens).zipWithIndex.map{case ((b, w), i) => 648 b.register(w.reduce(_||_), s2_ghv_wdatas(i), Some(s"s2_new_bit_$i"), 5) 649 } 650 651 XSPerfAccumulate("s2_redirect_because_target_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(0)) 652 XSPerfAccumulate("s2_redirect_because_branch_num_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(1)) 653 XSPerfAccumulate("s2_redirect_because_direction_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(2)) 654 XSPerfAccumulate("s2_redirect_because_cfi_idx_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(3)) 655 // XSPerfAccumulate("s2_redirect_because_shouldShiftVec_diff", s2_fire && s2_redirect_s1_last_pred_vec(4)) 656 // XSPerfAccumulate("s2_redirect_because_brTaken_diff", s2_fire && s2_redirect_s1_last_pred_vec(5)) 657 XSPerfAccumulate("s2_redirect_because_fallThroughError", s2_fire_dup(0) && resp.s2.fallThruError(0)) 658 659 XSPerfAccumulate("s2_redirect_when_taken", s2_redirect_dup(0) && resp.s2.taken(0) && resp.s2.full_pred(0).hit) 660 XSPerfAccumulate("s2_redirect_when_not_taken", s2_redirect_dup(0) && !resp.s2.taken(0) && resp.s2.full_pred(0).hit) 661 XSPerfAccumulate("s2_redirect_when_not_hit", s2_redirect_dup(0) && !resp.s2.full_pred(0).hit) 662 663 664 // s3 665 val s3_possible_predicted_ghist_ptrs_dup = s3_ghist_ptr_dup.map(ptr => (0 to numBr).map(ptr - _.U)) 666 val s3_predicted_ghist_ptr_dup = s3_possible_predicted_ghist_ptrs_dup.zip(resp.s3.lastBrPosOH).map{ case (ptr, oh) => Mux1H(oh, ptr)} 667 668 val s3_possible_predicted_fhs_dup = 669 for ((((fgh, afh), br_num_oh), full_pred) <- 670 s3_folded_gh_dup zip s3_ahead_fh_oldest_bits_dup zip s3_last_br_num_oh_dup zip resp.s3.full_pred) 671 yield (0 to numBr).map(i => 672 fgh.update(afh, br_num_oh, i, if (i > 0) full_pred.br_taken_mask(i-1) else false.B) 673 ) 674 val s3_predicted_fh_dup = resp.s3.lastBrPosOH.zip(s3_possible_predicted_fhs_dup).map{ case (oh, fh) => Mux1H(oh, fh)} 675 676 val s3_ahead_fh_ob_src_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos)) 677 s3_ahead_fh_ob_src_dup.zip(s3_ghist_ptr_dup).map{ case (src, ptr) => src.read(ghv, ptr)} 678 679 if (EnableGHistDiff) { 680 val s3_predicted_ghist = WireInit(getHist(s3_predicted_ghist_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool()))) 681 for (i <- 0 until numBr) { 682 when (resp.s3.shouldShiftVec(0)(i)) { 683 s3_predicted_ghist(i) := resp.s3.brTaken(0) && (i==0).B 684 } 685 } 686 when(s3_redirect_dup(0)) { 687 s0_ghist := s3_predicted_ghist.asUInt 688 } 689 } 690 691 val s3_ghv_wens = (0 until HistoryLength).map(n => 692 (0 until numBr).map(b => (s3_ghist_ptr_dup(0)).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s3.shouldShiftVec(0)(b) && s3_redirect_dup(0))) 693 val s3_ghv_wdatas = (0 until HistoryLength).map(n => 694 Mux1H( 695 (0 until numBr).map(b => ( 696 (s3_ghist_ptr_dup(0)).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s3.shouldShiftVec(0)(b), 697 resp.s3.full_pred(0).real_br_taken_mask()(b) 698 )) 699 ) 700 ) 701 702 val previous_s2_pred = RegEnable(resp.s2, 0.U.asTypeOf(resp.s2), s2_fire_dup(0)) 703 704 val s3_redirect_on_br_taken_dup = resp.s3.full_pred.zip(previous_s2_pred.full_pred).map {case (fp1, fp2) => fp1.real_br_taken_mask().asUInt =/= fp2.real_br_taken_mask().asUInt} 705 val s3_both_first_taken_dup = resp.s3.full_pred.zip(previous_s2_pred.full_pred).map {case (fp1, fp2) => fp1.real_br_taken_mask()(0) && fp2.real_br_taken_mask()(0)} 706 val s3_redirect_on_target_dup = resp.s3.getTarget.zip(previous_s2_pred.getTarget).map {case (t1, t2) => t1 =/= t2} 707 val s3_redirect_on_jalr_target_dup = resp.s3.full_pred.zip(previous_s2_pred.full_pred).map {case (fp1, fp2) => fp1.hit_taken_on_jalr && fp1.jalr_target =/= fp2.jalr_target} 708 val s3_redirect_on_fall_thru_error_dup = resp.s3.fallThruError 709 val s3_redirect_on_ftb_multi_hit_dup = resp.s3.ftbMultiHit 710 711 for (((((((s3_redirect, s3_fire), s3_redirect_on_br_taken), s3_redirect_on_target), s3_redirect_on_fall_thru_error), s3_redirect_on_ftb_multi_hit), s3_both_first_taken) <- 712 s3_redirect_dup zip s3_fire_dup zip s3_redirect_on_br_taken_dup zip s3_redirect_on_target_dup zip s3_redirect_on_fall_thru_error_dup zip s3_redirect_on_ftb_multi_hit_dup zip s3_both_first_taken_dup) { 713 714 s3_redirect := s3_fire && ( 715 (s3_redirect_on_br_taken && !s3_both_first_taken) || s3_redirect_on_target || s3_redirect_on_fall_thru_error || s3_redirect_on_ftb_multi_hit 716 ) 717 } 718 719 XSPerfAccumulate(f"s3_redirect_on_br_taken", s3_fire_dup(0) && s3_redirect_on_br_taken_dup(0)) 720 XSPerfAccumulate(f"s3_redirect_on_jalr_target", s3_fire_dup(0) && s3_redirect_on_jalr_target_dup(0)) 721 XSPerfAccumulate(f"s3_redirect_on_others", s3_redirect_dup(0) && !(s3_redirect_on_br_taken_dup(0) || s3_redirect_on_jalr_target_dup(0))) 722 723 for (((npcGen, s3_redirect), s3_target) <- npcGen_dup zip s3_redirect_dup zip resp.s3.getTarget) 724 npcGen.register(s3_redirect, s3_target, Some("s3_target"), 3) 725 for (((foldedGhGen, s3_redirect), s3_predicted_fh) <- foldedGhGen_dup zip s3_redirect_dup zip s3_predicted_fh_dup) 726 foldedGhGen.register(s3_redirect, s3_predicted_fh, Some("s3_FGH"), 3) 727 for (((ghistPtrGen, s3_redirect), s3_predicted_ghist_ptr) <- ghistPtrGen_dup zip s3_redirect_dup zip s3_predicted_ghist_ptr_dup) 728 ghistPtrGen.register(s3_redirect, s3_predicted_ghist_ptr, Some("s3_GHPtr"), 3) 729 for (((lastBrNumOHGen, s3_redirect), s3_brPosOH) <- lastBrNumOHGen_dup zip s3_redirect_dup zip resp.s3.lastBrPosOH.map(_.asUInt)) 730 lastBrNumOHGen.register(s3_redirect, s3_brPosOH, Some("s3_BrNumOH"), 3) 731 for (((aheadFhObGen, s3_redirect), s3_ahead_fh_ob_src) <- aheadFhObGen_dup zip s3_redirect_dup zip s3_ahead_fh_ob_src_dup) 732 aheadFhObGen.register(s3_redirect, s3_ahead_fh_ob_src, Some("s3_AFHOB"), 3) 733 ghvBitWriteGens.zip(s3_ghv_wens).zipWithIndex.map{case ((b, w), i) => 734 b.register(w.reduce(_||_), s3_ghv_wdatas(i), Some(s"s3_new_bit_$i"), 3) 735 } 736 737 // Send signal tell Ftq override 738 val s2_ftq_idx = RegEnable(io.ftq_to_bpu.enq_ptr, s1_fire_dup(0)) 739 val s3_ftq_idx = RegEnable(s2_ftq_idx, s2_fire_dup(0)) 740 741 for (((to_ftq_s1_valid, s1_fire), s1_flush) <- io.bpu_to_ftq.resp.bits.s1.valid zip s1_fire_dup zip s1_flush_dup) { 742 to_ftq_s1_valid := s1_fire && !s1_flush 743 } 744 io.bpu_to_ftq.resp.bits.s1.hasRedirect.map(_ := false.B) 745 io.bpu_to_ftq.resp.bits.s1.ftq_idx := DontCare 746 for (((to_ftq_s2_valid, s2_fire), s2_flush) <- io.bpu_to_ftq.resp.bits.s2.valid zip s2_fire_dup zip s2_flush_dup) { 747 to_ftq_s2_valid := s2_fire && !s2_flush 748 } 749 io.bpu_to_ftq.resp.bits.s2.hasRedirect.zip(s2_redirect_dup).map {case (hr, r) => hr := r} 750 io.bpu_to_ftq.resp.bits.s2.ftq_idx := s2_ftq_idx 751 for (((to_ftq_s3_valid, s3_fire), s3_flush) <- io.bpu_to_ftq.resp.bits.s3.valid zip s3_fire_dup zip s3_flush_dup) { 752 to_ftq_s3_valid := s3_fire && !s3_flush 753 } 754 io.bpu_to_ftq.resp.bits.s3.hasRedirect.zip(s3_redirect_dup).map {case (hr, r) => hr := r} 755 io.bpu_to_ftq.resp.bits.s3.ftq_idx := s3_ftq_idx 756 757 predictors.io.update.valid := RegNext(io.ftq_to_bpu.update.valid, init = false.B) 758 predictors.io.update.bits := RegEnable(io.ftq_to_bpu.update.bits, io.ftq_to_bpu.update.valid) 759 predictors.io.update.bits.ghist := RegEnable( 760 getHist(io.ftq_to_bpu.update.bits.spec_info.histPtr), io.ftq_to_bpu.update.valid) 761 762 val redirect_dup = do_redirect_dup.map(_.bits) 763 predictors.io.redirect := do_redirect_dup(0) 764 765 // Redirect logic 766 val shift_dup = redirect_dup.map(_.cfiUpdate.shift) 767 val addIntoHist_dup = redirect_dup.map(_.cfiUpdate.addIntoHist) 768 // TODO: remove these below 769 val shouldShiftVec_dup = shift_dup.map(shift => Mux(shift === 0.U, VecInit(0.U((1 << (log2Ceil(numBr) + 1)).W).asBools), VecInit((LowerMask(1.U << (shift-1.U))).asBools))) 770 // TODO end 771 val afhob_dup = redirect_dup.map(_.cfiUpdate.afhob) 772 val lastBrNumOH_dup = redirect_dup.map(_.cfiUpdate.lastBrNumOH) 773 774 775 val isBr_dup = redirect_dup.map(_.cfiUpdate.pd.isBr) 776 val taken_dup = redirect_dup.map(_.cfiUpdate.taken) 777 val real_br_taken_mask_dup = 778 for (((shift, taken), addIntoHist) <- shift_dup zip taken_dup zip addIntoHist_dup) 779 yield (0 until numBr).map(i => shift === (i+1).U && taken && addIntoHist ) 780 781 val oldPtr_dup = redirect_dup.map(_.cfiUpdate.histPtr) 782 val updated_ptr_dup = oldPtr_dup.zip(shift_dup).map {case (oldPtr, shift) => oldPtr - shift} 783 def computeFoldedHist(hist: UInt, compLen: Int)(histLen: Int): UInt = { 784 if (histLen > 0) { 785 val nChunks = (histLen + compLen - 1) / compLen 786 val hist_chunks = (0 until nChunks) map { i => 787 hist(min((i + 1) * compLen, histLen) - 1, i * compLen) 788 } 789 ParallelXOR(hist_chunks) 790 } 791 else 0.U 792 } 793 794 val oldFh_dup = dup_seq(WireInit(0.U.asTypeOf(new AllFoldedHistories(foldedGHistInfos)))) 795 oldFh_dup.zip(oldPtr_dup).map { case (oldFh, oldPtr) => 796 foldedGHistInfos.foreach { case (histLen, compLen) => 797 oldFh.getHistWithInfo((histLen, compLen)).folded_hist := computeFoldedHist(getHist(oldPtr), compLen)(histLen) 798 } 799 } 800 801 val updated_fh_dup = 802 for (((((oldFh, oldPtr), taken), addIntoHist), shift) <- 803 oldFh_dup zip oldPtr_dup zip taken_dup zip addIntoHist_dup zip shift_dup) 804 yield VecInit((0 to numBr).map(i => oldFh.update(ghv, oldPtr, i, taken && addIntoHist)))(shift) 805 val thisBrNumOH_dup = shift_dup.map(shift => UIntToOH(shift, numBr+1)) 806 val thisAheadFhOb_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos)) 807 thisAheadFhOb_dup.zip(oldPtr_dup).map {case (afhob, oldPtr) => afhob.read(ghv, oldPtr)} 808 val redirect_ghv_wens = (0 until HistoryLength).map(n => 809 (0 until numBr).map(b => oldPtr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && shouldShiftVec_dup(0)(b) && do_redirect_dup(0).valid)) 810 val redirect_ghv_wdatas = (0 until HistoryLength).map(n => 811 Mux1H( 812 (0 until numBr).map(b => oldPtr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && shouldShiftVec_dup(0)(b)), 813 real_br_taken_mask_dup(0) 814 ) 815 ) 816 817 if (EnableGHistDiff) { 818 val updated_ghist = WireInit(getHist(updated_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool()))) 819 for (i <- 0 until numBr) { 820 when (shift_dup(0) >= (i+1).U) { 821 updated_ghist(i) := taken_dup(0) && addIntoHist_dup(0) && (i==0).B 822 } 823 } 824 when(do_redirect_dup(0).valid) { 825 s0_ghist := updated_ghist.asUInt 826 } 827 } 828 829 // Commit time history checker 830 if (EnableCommitGHistDiff) { 831 val commitGHist = RegInit(0.U.asTypeOf(Vec(HistoryLength, Bool()))) 832 val commitGHistPtr = RegInit(0.U.asTypeOf(new CGHPtr)) 833 def getCommitHist(ptr: CGHPtr): UInt = 834 (Cat(commitGHist.asUInt, commitGHist.asUInt) >> (ptr.value+1.U))(HistoryLength-1, 0) 835 836 val updateValid : Bool = io.ftq_to_bpu.update.valid 837 val branchValidMask : UInt = io.ftq_to_bpu.update.bits.ftb_entry.brValids.asUInt 838 val branchCommittedMask: Vec[Bool] = io.ftq_to_bpu.update.bits.br_committed 839 val misPredictMask : UInt = io.ftq_to_bpu.update.bits.mispred_mask.asUInt 840 val takenMask : UInt = 841 io.ftq_to_bpu.update.bits.br_taken_mask.asUInt | 842 io.ftq_to_bpu.update.bits.ftb_entry.always_taken.asUInt // Always taken branch is recorded in history 843 val takenIdx : UInt = (PriorityEncoder(takenMask) + 1.U((log2Ceil(numBr)+1).W)).asUInt 844 val misPredictIdx : UInt = (PriorityEncoder(misPredictMask) + 1.U((log2Ceil(numBr)+1).W)).asUInt 845 val shouldShiftMask: UInt = Mux(takenMask.orR, 846 LowerMask(takenIdx).asUInt, 847 ((1 << numBr) - 1).asUInt) & 848 Mux(misPredictMask.orR, 849 LowerMask(misPredictIdx).asUInt, 850 ((1 << numBr) - 1).asUInt) & 851 branchCommittedMask.asUInt 852 val updateShift : UInt = 853 Mux(updateValid && branchValidMask.orR, PopCount(branchValidMask & shouldShiftMask), 0.U) 854 855 // Maintain the commitGHist 856 for (i <- 0 until numBr) { 857 when(updateShift >= (i + 1).U) { 858 val ptr: CGHPtr = commitGHistPtr - i.asUInt 859 commitGHist(ptr.value) := takenMask(i) 860 } 861 } 862 when(updateValid) { 863 commitGHistPtr := commitGHistPtr - updateShift 864 } 865 866 // Calculate true history using Parallel XOR 867 // Do differential 868 TageTableInfos.map { 869 case (nRows, histLen, _) => { 870 val nRowsPerBr = nRows / numBr 871 val predictGHistPtr = io.ftq_to_bpu.update.bits.spec_info.histPtr 872 val commitTrueHist: UInt = computeFoldedHist(getCommitHist(commitGHistPtr), log2Ceil(nRowsPerBr))(histLen) 873 val predictFHist : UInt = computeFoldedHist(getHist(predictGHistPtr), log2Ceil(nRowsPerBr))(histLen) 874 XSWarn(updateValid && predictFHist =/= commitTrueHist, 875 p"predict time ghist: ${predictFHist} is different from commit time: ${commitTrueHist}\n") 876 } 877 } 878 } 879 880 881 // val updatedGh = oldGh.update(shift, taken && addIntoHist) 882 for ((npcGen, do_redirect) <- npcGen_dup zip do_redirect_dup) 883 npcGen.register(do_redirect.valid, do_redirect.bits.cfiUpdate.target, Some("redirect_target"), 2) 884 for (((foldedGhGen, do_redirect), updated_fh) <- foldedGhGen_dup zip do_redirect_dup zip updated_fh_dup) 885 foldedGhGen.register(do_redirect.valid, updated_fh, Some("redirect_FGHT"), 2) 886 for (((ghistPtrGen, do_redirect), updated_ptr) <- ghistPtrGen_dup zip do_redirect_dup zip updated_ptr_dup) 887 ghistPtrGen.register(do_redirect.valid, updated_ptr, Some("redirect_GHPtr"), 2) 888 for (((lastBrNumOHGen, do_redirect), thisBrNumOH) <- lastBrNumOHGen_dup zip do_redirect_dup zip thisBrNumOH_dup) 889 lastBrNumOHGen.register(do_redirect.valid, thisBrNumOH, Some("redirect_BrNumOH"), 2) 890 for (((aheadFhObGen, do_redirect), thisAheadFhOb) <- aheadFhObGen_dup zip do_redirect_dup zip thisAheadFhOb_dup) 891 aheadFhObGen.register(do_redirect.valid, thisAheadFhOb, Some("redirect_AFHOB"), 2) 892 ghvBitWriteGens.zip(redirect_ghv_wens).zipWithIndex.map{case ((b, w), i) => 893 b.register(w.reduce(_||_), redirect_ghv_wdatas(i), Some(s"redirect_new_bit_$i"), 2) 894 } 895 // no need to assign s0_last_pred 896 897 // val need_reset = RegNext(reset.asBool) && !reset.asBool 898 899 // Reset 900 // npcGen.register(need_reset, resetVector.U, Some("reset_pc"), 1) 901 // foldedGhGen.register(need_reset, 0.U.asTypeOf(s0_folded_gh), Some("reset_FGH"), 1) 902 // ghistPtrGen.register(need_reset, 0.U.asTypeOf(new CGHPtr), Some("reset_GHPtr"), 1) 903 904 s0_pc_dup.zip(npcGen_dup).map {case (s0_pc, npcGen) => s0_pc := npcGen()} 905 s0_folded_gh_dup.zip(foldedGhGen_dup).map {case (s0_folded_gh, foldedGhGen) => s0_folded_gh := foldedGhGen()} 906 s0_ghist_ptr_dup.zip(ghistPtrGen_dup).map {case (s0_ghist_ptr, ghistPtrGen) => s0_ghist_ptr := ghistPtrGen()} 907 s0_ahead_fh_oldest_bits_dup.zip(aheadFhObGen_dup).map {case (s0_ahead_fh_oldest_bits, aheadFhObGen) => 908 s0_ahead_fh_oldest_bits := aheadFhObGen()} 909 s0_last_br_num_oh_dup.zip(lastBrNumOHGen_dup).map {case (s0_last_br_num_oh, lastBrNumOHGen) => 910 s0_last_br_num_oh := lastBrNumOHGen()} 911 (ghv_write_datas zip ghvBitWriteGens).map{case (wd, d) => wd := d()} 912 for (i <- 0 until HistoryLength) { 913 ghv_wens(i) := Seq(s1_ghv_wens, s2_ghv_wens, s3_ghv_wens, redirect_ghv_wens).map(_(i).reduce(_||_)).reduce(_||_) 914 when (ghv_wens(i)) { 915 ghv(i) := ghv_write_datas(i) 916 } 917 } 918 919 // TODO: signals for memVio and other Redirects 920 controlRedirectBubble := do_redirect_dup(0).valid && do_redirect_dup(0).bits.ControlRedirectBubble 921 ControlBTBMissBubble := do_redirect_dup(0).bits.ControlBTBMissBubble 922 TAGEMissBubble := do_redirect_dup(0).bits.TAGEMissBubble 923 SCMissBubble := do_redirect_dup(0).bits.SCMissBubble 924 ITTAGEMissBubble := do_redirect_dup(0).bits.ITTAGEMissBubble 925 RASMissBubble := do_redirect_dup(0).bits.RASMissBubble 926 927 memVioRedirectBubble := do_redirect_dup(0).valid && do_redirect_dup(0).bits.MemVioRedirectBubble 928 otherRedirectBubble := do_redirect_dup(0).valid && do_redirect_dup(0).bits.OtherRedirectBubble 929 btbMissBubble := do_redirect_dup(0).valid && do_redirect_dup(0).bits.BTBMissBubble 930 overrideBubble(0) := s2_redirect_dup(0) 931 overrideBubble(1) := s3_redirect_dup(0) 932 ftqUpdateBubble(0) := !s1_components_ready_dup(0) 933 ftqUpdateBubble(1) := !s2_components_ready_dup(0) 934 ftqUpdateBubble(2) := !s3_components_ready_dup(0) 935 ftqFullStall := !io.bpu_to_ftq.resp.ready 936 io.bpu_to_ftq.resp.bits.topdown_info := topdown_stages(numOfStage - 1) 937 938 // topdown handling logic here 939 when (controlRedirectBubble) { 940 /* 941 for (i <- 0 until numOfStage) 942 topdown_stages(i).reasons(TopDownCounters.ControlRedirectBubble.id) := true.B 943 io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.ControlRedirectBubble.id) := true.B 944 */ 945 when (ControlBTBMissBubble) { 946 for (i <- 0 until numOfStage) 947 topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id) := true.B 948 io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.BTBMissBubble.id) := true.B 949 } .elsewhen (TAGEMissBubble) { 950 for (i <- 0 until numOfStage) 951 topdown_stages(i).reasons(TopDownCounters.TAGEMissBubble.id) := true.B 952 io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.TAGEMissBubble.id) := true.B 953 } .elsewhen (SCMissBubble) { 954 for (i <- 0 until numOfStage) 955 topdown_stages(i).reasons(TopDownCounters.SCMissBubble.id) := true.B 956 io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.SCMissBubble.id) := true.B 957 } .elsewhen (ITTAGEMissBubble) { 958 for (i <- 0 until numOfStage) 959 topdown_stages(i).reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B 960 io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B 961 } .elsewhen (RASMissBubble) { 962 for (i <- 0 until numOfStage) 963 topdown_stages(i).reasons(TopDownCounters.RASMissBubble.id) := true.B 964 io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.RASMissBubble.id) := true.B 965 } 966 } 967 when (memVioRedirectBubble) { 968 for (i <- 0 until numOfStage) 969 topdown_stages(i).reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B 970 io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B 971 } 972 when (otherRedirectBubble) { 973 for (i <- 0 until numOfStage) 974 topdown_stages(i).reasons(TopDownCounters.OtherRedirectBubble.id) := true.B 975 io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.OtherRedirectBubble.id) := true.B 976 } 977 when (btbMissBubble) { 978 for (i <- 0 until numOfStage) 979 topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id) := true.B 980 io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.BTBMissBubble.id) := true.B 981 } 982 983 for (i <- 0 until numOfStage) { 984 if (i < numOfStage - overrideStage) { 985 when (overrideBubble(i)) { 986 for (j <- 0 to i) 987 topdown_stages(j).reasons(TopDownCounters.OverrideBubble.id) := true.B 988 } 989 } 990 if (i < numOfStage - ftqUpdateStage) { 991 when (ftqUpdateBubble(i)) { 992 topdown_stages(i).reasons(TopDownCounters.FtqUpdateBubble.id) := true.B 993 } 994 } 995 } 996 when (ftqFullStall) { 997 topdown_stages(0).reasons(TopDownCounters.FtqFullStall.id) := true.B 998 } 999 1000 XSError(isBefore(redirect_dup(0).cfiUpdate.histPtr, s3_ghist_ptr_dup(0)) && do_redirect_dup(0).valid, 1001 p"s3_ghist_ptr ${s3_ghist_ptr_dup(0)} exceeds redirect histPtr ${redirect_dup(0).cfiUpdate.histPtr}\n") 1002 XSError(isBefore(redirect_dup(0).cfiUpdate.histPtr, s2_ghist_ptr_dup(0)) && do_redirect_dup(0).valid, 1003 p"s2_ghist_ptr ${s2_ghist_ptr_dup(0)} exceeds redirect histPtr ${redirect_dup(0).cfiUpdate.histPtr}\n") 1004 XSError(isBefore(redirect_dup(0).cfiUpdate.histPtr, s1_ghist_ptr_dup(0)) && do_redirect_dup(0).valid, 1005 p"s1_ghist_ptr ${s1_ghist_ptr_dup(0)} exceeds redirect histPtr ${redirect_dup(0).cfiUpdate.histPtr}\n") 1006 1007 XSDebug(RegNext(reset.asBool) && !reset.asBool, "Reseting...\n") 1008 XSDebug(io.ftq_to_bpu.update.valid, p"Update from ftq\n") 1009 XSDebug(io.ftq_to_bpu.redirect.valid, p"Redirect from ftq\n") 1010 1011 XSDebug("[BP0] fire=%d pc=%x\n", s0_fire_dup(0), s0_pc_dup(0)) 1012 XSDebug("[BP1] v=%d r=%d cr=%d fire=%d flush=%d pc=%x\n", 1013 s1_valid_dup(0), s1_ready_dup(0), s1_components_ready_dup(0), s1_fire_dup(0), s1_flush_dup(0), s1_pc) 1014 XSDebug("[BP2] v=%d r=%d cr=%d fire=%d redirect=%d flush=%d pc=%x\n", 1015 s2_valid_dup(0), s2_ready_dup(0), s2_components_ready_dup(0), s2_fire_dup(0), s2_redirect_dup(0), s2_flush_dup(0), s2_pc) 1016 XSDebug("[BP3] v=%d r=%d cr=%d fire=%d redirect=%d flush=%d pc=%x\n", 1017 s3_valid_dup(0), s3_ready_dup(0), s3_components_ready_dup(0), s3_fire_dup(0), s3_redirect_dup(0), s3_flush_dup(0), s3_pc) 1018 XSDebug("[FTQ] ready=%d\n", io.bpu_to_ftq.resp.ready) 1019 XSDebug("resp.s1.target=%x\n", resp.s1.getTarget(0)) 1020 XSDebug("resp.s2.target=%x\n", resp.s2.getTarget(0)) 1021 // XSDebug("s0_ghist: %b\n", s0_ghist.predHist) 1022 // XSDebug("s1_ghist: %b\n", s1_ghist.predHist) 1023 // XSDebug("s2_ghist: %b\n", s2_ghist.predHist) 1024 // XSDebug("s2_predicted_ghist: %b\n", s2_predicted_ghist.predHist) 1025 XSDebug(p"s0_ghist_ptr: ${s0_ghist_ptr_dup(0)}\n") 1026 XSDebug(p"s1_ghist_ptr: ${s1_ghist_ptr_dup(0)}\n") 1027 XSDebug(p"s2_ghist_ptr: ${s2_ghist_ptr_dup(0)}\n") 1028 XSDebug(p"s3_ghist_ptr: ${s3_ghist_ptr_dup(0)}\n") 1029 1030 io.ftq_to_bpu.update.bits.display(io.ftq_to_bpu.update.valid) 1031 io.ftq_to_bpu.redirect.bits.display(io.ftq_to_bpu.redirect.valid) 1032 1033 1034 XSPerfAccumulate("s2_redirect", s2_redirect_dup(0)) 1035 XSPerfAccumulate("s3_redirect", s3_redirect_dup(0)) 1036 XSPerfAccumulate("s1_not_valid", !s1_valid_dup(0)) 1037 1038 val perfEvents = predictors.asInstanceOf[Composer].getPerfEvents 1039 generatePerfEvent() 1040} 1041