xref: /XiangShan/src/main/scala/xiangshan/frontend/BPU.scala (revision cf7d6b7a1a781c73aeb87de112de2e7fe5ea3b7c)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.frontend
18
19import chisel3._
20import chisel3.util._
21import org.chipsalliance.cde.config.Parameters
22import scala.math.min
23import utility._
24import utils._
25import xiangshan._
26import xiangshan.backend.decode.ImmUnion
27
28trait HasBPUConst extends HasXSParameter {
29  val MaxMetaBaseLength = if (!env.FPGAPlatform) 512 else 256 // TODO: Reduce meta length
30  val MaxMetaLength     = if (HasHExtension) MaxMetaBaseLength + 4 else MaxMetaBaseLength
31  val MaxBasicBlockSize = 32
32  val LHistoryLength    = 32
33  // val numBr = 2
34  val useBPD    = true
35  val useLHist  = true
36  val numBrSlot = numBr - 1
37  val totalSlot = numBrSlot + 1
38
39  val numDup = 4
40
41  // Used to gate PC higher parts
42  val pcSegments = Seq(VAddrBits - 24, 12, 12)
43
44  def BP_STAGES = (0 until 3).map(_.U(2.W))
45  def BP_S1     = BP_STAGES(0)
46  def BP_S2     = BP_STAGES(1)
47  def BP_S3     = BP_STAGES(2)
48
49  def dup_seq[T](src:          T, num: Int = numDup) = Seq.tabulate(num)(n => src)
50  def dup[T <: Data](src:      T, num: Int = numDup) = VecInit(Seq.tabulate(num)(n => src))
51  def dup_wire[T <: Data](src: T, num: Int = numDup) = Wire(Vec(num, src.cloneType))
52  def dup_idx     = Seq.tabulate(numDup)(n => n.toString())
53  val numBpStages = BP_STAGES.length
54
55  val debug = true
56  // TODO: Replace log2Up by log2Ceil
57}
58
59trait HasBPUParameter extends HasXSParameter with HasBPUConst {
60  val BPUDebug            = true && !env.FPGAPlatform && env.EnablePerfDebug
61  val EnableCFICommitLog  = true
62  val EnbaleCFIPredLog    = true
63  val EnableBPUTimeRecord = (EnableCFICommitLog || EnbaleCFIPredLog) && !env.FPGAPlatform
64  val EnableCommit        = false
65}
66
67class BPUCtrl(implicit p: Parameters) extends XSBundle {
68  val ubtb_enable = Bool()
69  val btb_enable  = Bool()
70  val bim_enable  = Bool()
71  val tage_enable = Bool()
72  val sc_enable   = Bool()
73  val ras_enable  = Bool()
74  val loop_enable = Bool()
75}
76
77trait BPUUtils extends HasXSParameter {
78  // circular shifting
79  def circularShiftLeft(source: UInt, len: Int, shamt: UInt): UInt = {
80    val res    = Wire(UInt(len.W))
81    val higher = source << shamt
82    val lower  = source >> (len.U - shamt)
83    res := higher | lower
84    res
85  }
86
87  def circularShiftRight(source: UInt, len: Int, shamt: UInt): UInt = {
88    val res    = Wire(UInt(len.W))
89    val higher = source << (len.U - shamt)
90    val lower  = source >> shamt
91    res := higher | lower
92    res
93  }
94
95  // To be verified
96  def satUpdate(old: UInt, len: Int, taken: Bool): UInt = {
97    val oldSatTaken    = old === ((1 << len) - 1).U
98    val oldSatNotTaken = old === 0.U
99    Mux(oldSatTaken && taken, ((1 << len) - 1).U, Mux(oldSatNotTaken && !taken, 0.U, Mux(taken, old + 1.U, old - 1.U)))
100  }
101
102  def signedSatUpdate(old: SInt, len: Int, taken: Bool): SInt = {
103    val oldSatTaken    = old === ((1 << (len - 1)) - 1).S
104    val oldSatNotTaken = old === (-(1 << (len - 1))).S
105    Mux(
106      oldSatTaken && taken,
107      ((1 << (len - 1)) - 1).S,
108      Mux(oldSatNotTaken && !taken, (-(1 << (len - 1))).S, Mux(taken, old + 1.S, old - 1.S))
109    )
110  }
111
112  def getFallThroughAddr(start: UInt, carry: Bool, pft: UInt) = {
113    val higher = start.head(VAddrBits - log2Ceil(PredictWidth) - instOffsetBits)
114    Cat(Mux(carry, higher + 1.U, higher), pft, 0.U(instOffsetBits.W))
115  }
116
117  def foldTag(tag: UInt, l: Int): UInt = {
118    val nChunks = (tag.getWidth + l - 1) / l
119    val chunks  = (0 until nChunks).map(i => tag(min((i + 1) * l, tag.getWidth) - 1, i * l))
120    ParallelXOR(chunks)
121  }
122}
123
124class BasePredictorInput(implicit p: Parameters) extends XSBundle with HasBPUConst {
125  def nInputs = 1
126
127  val s0_pc = Vec(numDup, UInt(VAddrBits.W))
128
129  val folded_hist    = Vec(numDup, new AllFoldedHistories(foldedGHistInfos))
130  val s1_folded_hist = Vec(numDup, new AllFoldedHistories(foldedGHistInfos))
131  val ghist          = UInt(HistoryLength.W)
132
133  val resp_in = Vec(nInputs, new BranchPredictionResp)
134
135  // val final_preds = Vec(numBpStages, new)
136  // val toFtq_fire = Bool()
137
138  // val s0_all_ready = Bool()
139}
140
141class BasePredictorOutput(implicit p: Parameters) extends BranchPredictionResp {}
142
143class BasePredictorIO(implicit p: Parameters) extends XSBundle with HasBPUConst {
144  val reset_vector = Input(UInt(PAddrBits.W))
145  val in           = Flipped(DecoupledIO(new BasePredictorInput)) // TODO: Remove DecoupledIO
146  // val out = DecoupledIO(new BasePredictorOutput)
147  val out = Output(new BasePredictorOutput)
148  // val flush_out = Valid(UInt(VAddrBits.W))
149
150  val fauftb_entry_in      = Input(new FTBEntry)
151  val fauftb_entry_hit_in  = Input(Bool())
152  val fauftb_entry_out     = Output(new FTBEntry)
153  val fauftb_entry_hit_out = Output(Bool())
154
155  val ctrl = Input(new BPUCtrl)
156
157  val s0_fire = Input(Vec(numDup, Bool()))
158  val s1_fire = Input(Vec(numDup, Bool()))
159  val s2_fire = Input(Vec(numDup, Bool()))
160  val s3_fire = Input(Vec(numDup, Bool()))
161
162  val s2_redirect = Input(Vec(numDup, Bool()))
163  val s3_redirect = Input(Vec(numDup, Bool()))
164
165  val s1_ready = Output(Bool())
166  val s2_ready = Output(Bool())
167  val s3_ready = Output(Bool())
168
169  val update          = Flipped(Valid(new BranchPredictionUpdate))
170  val redirect        = Flipped(Valid(new BranchPredictionRedirect))
171  val redirectFromIFU = Input(Bool())
172}
173
174abstract class BasePredictor(implicit p: Parameters) extends XSModule
175    with HasBPUConst with BPUUtils with HasPerfEvents {
176  val meta_size      = 0
177  val spec_meta_size = 0
178  val is_fast_pred   = false
179  val io             = IO(new BasePredictorIO())
180
181  io.out := io.in.bits.resp_in(0)
182
183  io.fauftb_entry_out     := io.fauftb_entry_in
184  io.fauftb_entry_hit_out := io.fauftb_entry_hit_in
185
186  io.out.last_stage_meta := 0.U
187
188  io.in.ready := !io.redirect.valid
189
190  io.s1_ready := true.B
191  io.s2_ready := true.B
192  io.s3_ready := true.B
193
194  val s0_pc_dup = WireInit(io.in.bits.s0_pc) // fetchIdx(io.f0_pc)
195  val s1_pc_dup = s0_pc_dup.zip(io.s0_fire).map { case (s0_pc, s0_fire) => RegEnable(s0_pc, s0_fire) }
196  val s2_pc_dup = s1_pc_dup.zip(io.s1_fire).map { case (s1_pc, s1_fire) =>
197    SegmentedAddrNext(s1_pc, pcSegments, s1_fire, Some("s2_pc"))
198  }
199  val s3_pc_dup = s2_pc_dup.zip(io.s2_fire).map { case (s2_pc, s2_fire) =>
200    SegmentedAddrNext(s2_pc, s2_fire, Some("s3_pc"))
201  }
202
203  when(RegNext(RegNext(reset.asBool) && !reset.asBool)) {
204    s1_pc_dup.map { case s1_pc => s1_pc := io.reset_vector }
205  }
206
207  io.out.s1.pc := s1_pc_dup
208  io.out.s2.pc := s2_pc_dup.map(_.getAddr())
209  io.out.s3.pc := s3_pc_dup.map(_.getAddr())
210
211  val perfEvents: Seq[(String, UInt)] = Seq()
212
213  def getFoldedHistoryInfo: Option[Set[FoldedHistoryInfo]] = None
214}
215
216class FakePredictor(implicit p: Parameters) extends BasePredictor {
217  io.in.ready            := true.B
218  io.out.last_stage_meta := 0.U
219  io.out                 := io.in.bits.resp_in(0)
220}
221
222class BpuToFtqIO(implicit p: Parameters) extends XSBundle {
223  val resp = DecoupledIO(new BpuToFtqBundle())
224}
225
226class PredictorIO(implicit p: Parameters) extends XSBundle {
227  val bpu_to_ftq   = new BpuToFtqIO()
228  val ftq_to_bpu   = Flipped(new FtqToBpuIO)
229  val ctrl         = Input(new BPUCtrl)
230  val reset_vector = Input(UInt(PAddrBits.W))
231}
232
233class Predictor(implicit p: Parameters) extends XSModule with HasBPUConst with HasPerfEvents
234    with HasCircularQueuePtrHelper {
235  val io = IO(new PredictorIO)
236
237  val ctrl       = DelayN(io.ctrl, 1)
238  val predictors = Module(if (useBPD) new Composer else new FakePredictor)
239
240  def numOfStage = 3
241  require(numOfStage > 1, "BPU numOfStage must be greater than 1")
242  val topdown_stages = RegInit(VecInit(Seq.fill(numOfStage)(0.U.asTypeOf(new FrontendTopDownBundle))))
243
244  // following can only happen on s1
245  val controlRedirectBubble = Wire(Bool())
246  val ControlBTBMissBubble  = Wire(Bool())
247  val TAGEMissBubble        = Wire(Bool())
248  val SCMissBubble          = Wire(Bool())
249  val ITTAGEMissBubble      = Wire(Bool())
250  val RASMissBubble         = Wire(Bool())
251
252  val memVioRedirectBubble = Wire(Bool())
253  val otherRedirectBubble  = Wire(Bool())
254  val btbMissBubble        = Wire(Bool())
255  otherRedirectBubble  := false.B
256  memVioRedirectBubble := false.B
257
258  // override can happen between s1-s2 and s2-s3
259  val overrideBubble = Wire(Vec(numOfStage - 1, Bool()))
260  def overrideStage  = 1
261  // ftq update block can happen on s1, s2 and s3
262  val ftqUpdateBubble = Wire(Vec(numOfStage, Bool()))
263  def ftqUpdateStage  = 0
264  // ftq full stall only happens on s3 (last stage)
265  val ftqFullStall = Wire(Bool())
266
267  // by default, no bubble event
268  topdown_stages(0) := 0.U.asTypeOf(new FrontendTopDownBundle)
269  // event movement driven by clock only
270  for (i <- 0 until numOfStage - 1) {
271    topdown_stages(i + 1) := topdown_stages(i)
272  }
273
274  // ctrl signal
275  predictors.io.ctrl         := ctrl
276  predictors.io.reset_vector := io.reset_vector
277
278  val s0_stall_dup = dup_wire(Bool()) // For some reason s0 stalled, usually FTQ Full
279  val s0_fire_dup, s1_fire_dup, s2_fire_dup, s3_fire_dup                        = dup_wire(Bool())
280  val s1_valid_dup, s2_valid_dup, s3_valid_dup                                  = dup_seq(RegInit(false.B))
281  val s1_ready_dup, s2_ready_dup, s3_ready_dup                                  = dup_wire(Bool())
282  val s1_components_ready_dup, s2_components_ready_dup, s3_components_ready_dup = dup_wire(Bool())
283
284  val s0_pc_dup     = dup(WireInit(0.U.asTypeOf(UInt(VAddrBits.W))))
285  val s0_pc_reg_dup = s0_pc_dup.zip(s0_stall_dup).map { case (s0_pc, s0_stall) => RegEnable(s0_pc, !s0_stall) }
286  when(RegNext(RegNext(reset.asBool) && !reset.asBool)) {
287    s0_pc_reg_dup.map { case s0_pc => s0_pc := io.reset_vector }
288  }
289  val s1_pc = RegEnable(s0_pc_dup(0), s0_fire_dup(0))
290  val s2_pc = RegEnable(s1_pc, s1_fire_dup(0))
291  val s3_pc = RegEnable(s2_pc, s2_fire_dup(0))
292
293  val s0_folded_gh_dup = dup_wire(new AllFoldedHistories(foldedGHistInfos))
294  val s0_folded_gh_reg_dup = s0_folded_gh_dup.zip(s0_stall_dup).map {
295    case (x, s0_stall) => RegEnable(x, 0.U.asTypeOf(s0_folded_gh_dup(0)), !s0_stall)
296  }
297  val s1_folded_gh_dup = RegEnable(s0_folded_gh_dup, 0.U.asTypeOf(s0_folded_gh_dup), s0_fire_dup(1))
298  val s2_folded_gh_dup = RegEnable(s1_folded_gh_dup, 0.U.asTypeOf(s0_folded_gh_dup), s1_fire_dup(1))
299  val s3_folded_gh_dup = RegEnable(s2_folded_gh_dup, 0.U.asTypeOf(s0_folded_gh_dup), s2_fire_dup(1))
300
301  val s0_last_br_num_oh_dup = dup_wire(UInt((numBr + 1).W))
302  val s0_last_br_num_oh_reg_dup = s0_last_br_num_oh_dup.zip(s0_stall_dup).map {
303    case (x, s0_stall) => RegEnable(x, 0.U, !s0_stall)
304  }
305  val s1_last_br_num_oh_dup = RegEnable(s0_last_br_num_oh_dup, 0.U.asTypeOf(s0_last_br_num_oh_dup), s0_fire_dup(1))
306  val s2_last_br_num_oh_dup = RegEnable(s1_last_br_num_oh_dup, 0.U.asTypeOf(s0_last_br_num_oh_dup), s1_fire_dup(1))
307  val s3_last_br_num_oh_dup = RegEnable(s2_last_br_num_oh_dup, 0.U.asTypeOf(s0_last_br_num_oh_dup), s2_fire_dup(1))
308
309  val s0_ahead_fh_oldest_bits_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos))
310  val s0_ahead_fh_oldest_bits_reg_dup = s0_ahead_fh_oldest_bits_dup.zip(s0_stall_dup).map {
311    case (x, s0_stall) => RegEnable(x, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup(0)), !s0_stall)
312  }
313  val s1_ahead_fh_oldest_bits_dup =
314    RegEnable(s0_ahead_fh_oldest_bits_dup, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup), s0_fire_dup(1))
315  val s2_ahead_fh_oldest_bits_dup =
316    RegEnable(s1_ahead_fh_oldest_bits_dup, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup), s1_fire_dup(1))
317  val s3_ahead_fh_oldest_bits_dup =
318    RegEnable(s2_ahead_fh_oldest_bits_dup, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup), s2_fire_dup(1))
319
320  val npcGen_dup         = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[UInt])
321  val foldedGhGen_dup    = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[AllFoldedHistories])
322  val ghistPtrGen_dup    = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[CGHPtr])
323  val lastBrNumOHGen_dup = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[UInt])
324  val aheadFhObGen_dup   = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[AllAheadFoldedHistoryOldestBits])
325
326  val ghvBitWriteGens = Seq.tabulate(HistoryLength)(n => new PhyPriorityMuxGenerator[Bool])
327  // val ghistGen = new PhyPriorityMuxGenerator[UInt]
328
329  val ghv      = RegInit(0.U.asTypeOf(Vec(HistoryLength, Bool())))
330  val ghv_wire = WireInit(ghv)
331
332  val s0_ghist = WireInit(0.U.asTypeOf(UInt(HistoryLength.W)))
333
334  println(f"history buffer length ${HistoryLength}")
335  val ghv_write_datas = Wire(Vec(HistoryLength, Bool()))
336  val ghv_wens        = Wire(Vec(HistoryLength, Bool()))
337
338  val s0_ghist_ptr_dup = dup_wire(new CGHPtr)
339  val s0_ghist_ptr_reg_dup = s0_ghist_ptr_dup.zip(s0_stall_dup).map {
340    case (x, s0_stall) => RegEnable(x, 0.U.asTypeOf(new CGHPtr), !s0_stall)
341  }
342  val s1_ghist_ptr_dup = RegEnable(s0_ghist_ptr_dup, 0.U.asTypeOf(s0_ghist_ptr_dup), s0_fire_dup(1))
343  val s2_ghist_ptr_dup = RegEnable(s1_ghist_ptr_dup, 0.U.asTypeOf(s0_ghist_ptr_dup), s1_fire_dup(1))
344  val s3_ghist_ptr_dup = RegEnable(s2_ghist_ptr_dup, 0.U.asTypeOf(s0_ghist_ptr_dup), s2_fire_dup(1))
345
346  def getHist(ptr: CGHPtr): UInt = (Cat(ghv_wire.asUInt, ghv_wire.asUInt) >> (ptr.value + 1.U))(HistoryLength - 1, 0)
347  s0_ghist := getHist(s0_ghist_ptr_dup(0))
348
349  val resp = predictors.io.out
350
351  val toFtq_fire = io.bpu_to_ftq.resp.valid && io.bpu_to_ftq.resp.ready
352
353  val s1_flush_dup, s2_flush_dup, s3_flush_dup = dup_wire(Bool())
354  val s2_redirect_dup, s3_redirect_dup         = dup_wire(Bool())
355
356  // predictors.io := DontCare
357  predictors.io.in.valid               := s0_fire_dup(0)
358  predictors.io.in.bits.s0_pc          := s0_pc_dup
359  predictors.io.in.bits.ghist          := s0_ghist
360  predictors.io.in.bits.folded_hist    := s0_folded_gh_dup
361  predictors.io.in.bits.s1_folded_hist := s1_folded_gh_dup
362  predictors.io.in.bits.resp_in(0)     := 0.U.asTypeOf(new BranchPredictionResp)
363  predictors.io.fauftb_entry_in        := 0.U.asTypeOf(new FTBEntry)
364  predictors.io.fauftb_entry_hit_in    := false.B
365  predictors.io.redirectFromIFU        := RegNext(io.ftq_to_bpu.redirctFromIFU, init = false.B)
366  // predictors.io.in.bits.resp_in(0).s1.pc := s0_pc
367  // predictors.io.in.bits.toFtq_fire := toFtq_fire
368
369  // predictors.io.out.ready := io.bpu_to_ftq.resp.ready
370
371  val redirect_req    = io.ftq_to_bpu.redirect
372  val do_redirect_dup = dup_seq(RegNextWithEnable(redirect_req))
373
374  // Pipeline logic
375  s2_redirect_dup.map(_ := false.B)
376  s3_redirect_dup.map(_ := false.B)
377
378  s3_flush_dup.map(_ := redirect_req.valid) // flush when redirect comes
379  for (((s2_flush, s3_flush), s3_redirect) <- s2_flush_dup zip s3_flush_dup zip s3_redirect_dup)
380    s2_flush := s3_flush || s3_redirect
381  for (((s1_flush, s2_flush), s2_redirect) <- s1_flush_dup zip s2_flush_dup zip s2_redirect_dup)
382    s1_flush := s2_flush || s2_redirect
383
384  s1_components_ready_dup.map(_ := predictors.io.s1_ready)
385  for (((s1_ready, s1_fire), s1_valid) <- s1_ready_dup zip s1_fire_dup zip s1_valid_dup)
386    s1_ready := s1_fire || !s1_valid
387  for (((s0_fire, s1_components_ready), s1_ready) <- s0_fire_dup zip s1_components_ready_dup zip s1_ready_dup)
388    s0_fire             := s1_components_ready && s1_ready
389  predictors.io.s0_fire := s0_fire_dup
390
391  s2_components_ready_dup.map(_ := predictors.io.s2_ready)
392  for (((s2_ready, s2_fire), s2_valid) <- s2_ready_dup zip s2_fire_dup zip s2_valid_dup)
393    s2_ready := s2_fire || !s2_valid
394  for (
395    (((s1_fire, s2_components_ready), s2_ready), s1_valid) <-
396      s1_fire_dup zip s2_components_ready_dup zip s2_ready_dup zip s1_valid_dup
397  )
398    s1_fire := s1_valid && s2_components_ready && s2_ready && io.bpu_to_ftq.resp.ready
399
400  s3_components_ready_dup.map(_ := predictors.io.s3_ready)
401  for (((s3_ready, s3_fire), s3_valid) <- s3_ready_dup zip s3_fire_dup zip s3_valid_dup)
402    s3_ready := s3_fire || !s3_valid
403  for (
404    (((s2_fire, s3_components_ready), s3_ready), s2_valid) <-
405      s2_fire_dup zip s3_components_ready_dup zip s3_ready_dup zip s2_valid_dup
406  )
407    s2_fire := s2_valid && s3_components_ready && s3_ready
408
409  for ((((s0_fire, s1_flush), s1_fire), s1_valid) <- s0_fire_dup zip s1_flush_dup zip s1_fire_dup zip s1_valid_dup) {
410    when(redirect_req.valid)(s1_valid := false.B)
411      .elsewhen(s0_fire)(s1_valid := true.B)
412      .elsewhen(s1_flush)(s1_valid := false.B)
413      .elsewhen(s1_fire)(s1_valid := false.B)
414  }
415  predictors.io.s1_fire := s1_fire_dup
416
417  s2_fire_dup := s2_valid_dup
418
419  for (
420    ((((s1_fire, s2_flush), s2_fire), s2_valid), s1_flush) <-
421      s1_fire_dup zip s2_flush_dup zip s2_fire_dup zip s2_valid_dup zip s1_flush_dup
422  ) {
423
424    when(s2_flush)(s2_valid := false.B)
425      .elsewhen(s1_fire)(s2_valid := !s1_flush)
426      .elsewhen(s2_fire)(s2_valid := false.B)
427  }
428
429  predictors.io.s2_fire     := s2_fire_dup
430  predictors.io.s2_redirect := s2_redirect_dup
431
432  s3_fire_dup := s3_valid_dup
433
434  for (
435    ((((s2_fire, s3_flush), s3_fire), s3_valid), s2_flush) <-
436      s2_fire_dup zip s3_flush_dup zip s3_fire_dup zip s3_valid_dup zip s2_flush_dup
437  ) {
438
439    when(s3_flush)(s3_valid := false.B)
440      .elsewhen(s2_fire)(s3_valid := !s2_flush)
441      .elsewhen(s3_fire)(s3_valid := false.B)
442  }
443
444  predictors.io.s3_fire     := s3_fire_dup
445  predictors.io.s3_redirect := s3_redirect_dup
446
447  io.bpu_to_ftq.resp.valid :=
448    s1_valid_dup(2) && s2_components_ready_dup(2) && s2_ready_dup(2) ||
449      s2_fire_dup(2) && s2_redirect_dup(2) ||
450      s3_fire_dup(2) && s3_redirect_dup(2)
451  io.bpu_to_ftq.resp.bits                              := predictors.io.out
452  io.bpu_to_ftq.resp.bits.last_stage_spec_info.histPtr := s3_ghist_ptr_dup(2)
453
454  val full_pred_diff        = WireInit(false.B)
455  val full_pred_diff_stage  = WireInit(0.U)
456  val full_pred_diff_offset = WireInit(0.U)
457  for (i <- 0 until numDup - 1) {
458    when(io.bpu_to_ftq.resp.valid &&
459      ((io.bpu_to_ftq.resp.bits.s1.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s1.full_pred(
460        i + 1
461      ).asTypeOf(UInt()) && io.bpu_to_ftq.resp.bits.s1.full_pred(i).hit) ||
462        (io.bpu_to_ftq.resp.bits.s2.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s2.full_pred(
463          i + 1
464        ).asTypeOf(UInt()) && io.bpu_to_ftq.resp.bits.s2.full_pred(i).hit) ||
465        (io.bpu_to_ftq.resp.bits.s3.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s3.full_pred(
466          i + 1
467        ).asTypeOf(UInt()) && io.bpu_to_ftq.resp.bits.s3.full_pred(i).hit))) {
468      full_pred_diff        := true.B
469      full_pred_diff_offset := i.U
470      when(io.bpu_to_ftq.resp.bits.s1.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s1.full_pred(
471        i + 1
472      ).asTypeOf(UInt())) {
473        full_pred_diff_stage := 1.U
474      }.elsewhen(io.bpu_to_ftq.resp.bits.s2.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s2.full_pred(
475        i + 1
476      ).asTypeOf(UInt())) {
477        full_pred_diff_stage := 2.U
478      }.otherwise {
479        full_pred_diff_stage := 3.U
480      }
481    }
482  }
483  XSError(full_pred_diff, "Full prediction difference detected!")
484
485  // s0_stall should be exclusive with any other PC source
486  s0_stall_dup.zip(s1_valid_dup).zip(s2_redirect_dup).zip(s3_redirect_dup).zip(do_redirect_dup).foreach {
487    case ((((s0_stall, s1_valid), s2_redirect), s3_redirect), do_redirect) => {
488      s0_stall := !(s1_valid || s2_redirect || s3_redirect || do_redirect.valid)
489    }
490  }
491  // Power-on reset
492  val powerOnResetState = RegInit(true.B)
493  when(s0_fire_dup(0)) {
494    // When BPU pipeline first time fire, we consider power-on reset is done
495    powerOnResetState := false.B
496  }
497  XSError(
498    !powerOnResetState && s0_stall_dup(0) && s0_pc_dup(0) =/= s0_pc_reg_dup(0),
499    "s0_stall but s0_pc is differenct from s0_pc_reg"
500  )
501
502  npcGen_dup.zip(s0_pc_reg_dup).map { case (gen, reg) =>
503    gen.register(true.B, reg, Some("stallPC"), 0)
504  }
505  foldedGhGen_dup.zip(s0_folded_gh_reg_dup).map { case (gen, reg) =>
506    gen.register(true.B, reg, Some("stallFGH"), 0)
507  }
508  ghistPtrGen_dup.zip(s0_ghist_ptr_reg_dup).map { case (gen, reg) =>
509    gen.register(true.B, reg, Some("stallGHPtr"), 0)
510  }
511  lastBrNumOHGen_dup.zip(s0_last_br_num_oh_reg_dup).map { case (gen, reg) =>
512    gen.register(true.B, reg, Some("stallBrNumOH"), 0)
513  }
514  aheadFhObGen_dup.zip(s0_ahead_fh_oldest_bits_reg_dup).map { case (gen, reg) =>
515    gen.register(true.B, reg, Some("stallAFHOB"), 0)
516  }
517
518  // assign pred cycle for profiling
519  io.bpu_to_ftq.resp.bits.s1.full_pred.map(_.predCycle.map(_ := GTimer()))
520  io.bpu_to_ftq.resp.bits.s2.full_pred.map(_.predCycle.map(_ := GTimer()))
521  io.bpu_to_ftq.resp.bits.s3.full_pred.map(_.predCycle.map(_ := GTimer()))
522
523  // History manage
524  // s1
525  val s1_possible_predicted_ghist_ptrs_dup = s1_ghist_ptr_dup.map(ptr => (0 to numBr).map(ptr - _.U))
526  val s1_predicted_ghist_ptr_dup = s1_possible_predicted_ghist_ptrs_dup.zip(resp.s1.lastBrPosOH).map { case (ptr, oh) =>
527    Mux1H(oh, ptr)
528  }
529  val s1_possible_predicted_fhs_dup =
530    for (
531      ((((fgh, afh), br_num_oh), t), br_pos_oh) <-
532        s1_folded_gh_dup zip s1_ahead_fh_oldest_bits_dup zip s1_last_br_num_oh_dup zip resp.s1.brTaken zip resp.s1.lastBrPosOH
533    )
534      yield (0 to numBr).map(i =>
535        fgh.update(afh, br_num_oh, i, t & br_pos_oh(i))
536      )
537  val s1_predicted_fh_dup = resp.s1.lastBrPosOH.zip(s1_possible_predicted_fhs_dup).map { case (oh, fh) =>
538    Mux1H(oh, fh)
539  }
540
541  val s1_ahead_fh_ob_src_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos))
542  s1_ahead_fh_ob_src_dup.zip(s1_ghist_ptr_dup).map { case (src, ptr) => src.read(ghv, ptr) }
543
544  if (EnableGHistDiff) {
545    val s1_predicted_ghist = WireInit(getHist(s1_predicted_ghist_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool())))
546    for (i <- 0 until numBr) {
547      when(resp.s1.shouldShiftVec(0)(i)) {
548        s1_predicted_ghist(i) := resp.s1.brTaken(0) && (i == 0).B
549      }
550    }
551    when(s1_valid_dup(0)) {
552      s0_ghist := s1_predicted_ghist.asUInt
553    }
554  }
555
556  val s1_ghv_wens = (0 until HistoryLength).map(n =>
557    (0 until numBr).map(b =>
558      s1_ghist_ptr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s1.shouldShiftVec(0)(b) && s1_valid_dup(
559        0
560      )
561    )
562  )
563  val s1_ghv_wdatas = (0 until HistoryLength).map(n =>
564    Mux1H(
565      (0 until numBr).map(b =>
566        (
567          s1_ghist_ptr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s1.shouldShiftVec(0)(b),
568          resp.s1.brTaken(0) && resp.s1.lastBrPosOH(0)(b + 1)
569        )
570      )
571    )
572  )
573
574  for (((npcGen, s1_valid), s1_target) <- npcGen_dup zip s1_valid_dup zip resp.s1.getTarget)
575    npcGen.register(s1_valid, s1_target, Some("s1_target"), 4)
576  for (((foldedGhGen, s1_valid), s1_predicted_fh) <- foldedGhGen_dup zip s1_valid_dup zip s1_predicted_fh_dup)
577    foldedGhGen.register(s1_valid, s1_predicted_fh, Some("s1_FGH"), 4)
578  for (
579    ((ghistPtrGen, s1_valid), s1_predicted_ghist_ptr) <- ghistPtrGen_dup zip s1_valid_dup zip s1_predicted_ghist_ptr_dup
580  )
581    ghistPtrGen.register(s1_valid, s1_predicted_ghist_ptr, Some("s1_GHPtr"), 4)
582  for (
583    ((lastBrNumOHGen, s1_valid), s1_brPosOH) <-
584      lastBrNumOHGen_dup zip s1_valid_dup zip resp.s1.lastBrPosOH.map(_.asUInt)
585  )
586    lastBrNumOHGen.register(s1_valid, s1_brPosOH, Some("s1_BrNumOH"), 4)
587  for (((aheadFhObGen, s1_valid), s1_ahead_fh_ob_src) <- aheadFhObGen_dup zip s1_valid_dup zip s1_ahead_fh_ob_src_dup)
588    aheadFhObGen.register(s1_valid, s1_ahead_fh_ob_src, Some("s1_AFHOB"), 4)
589  ghvBitWriteGens.zip(s1_ghv_wens).zipWithIndex.map { case ((b, w), i) =>
590    b.register(w.reduce(_ || _), s1_ghv_wdatas(i), Some(s"s1_new_bit_$i"), 4)
591  }
592
593  class PreviousPredInfo extends Bundle {
594    val hit         = Vec(numDup, Bool())
595    val target      = Vec(numDup, UInt(VAddrBits.W))
596    val lastBrPosOH = Vec(numDup, Vec(numBr + 1, Bool()))
597    val taken       = Vec(numDup, Bool())
598    val takenMask   = Vec(numDup, Vec(numBr, Bool()))
599    val cfiIndex    = Vec(numDup, UInt(log2Ceil(PredictWidth).W))
600  }
601
602  def preds_needs_redirect_vec_dup(x: PreviousPredInfo, y: BranchPredictionBundle) = {
603    // Timing optimization
604    // We first compare all target with previous stage target,
605    // then select the difference by taken & hit
606    // Usually target is generated quicker than taken, so do target compare before select can help timing
607    val targetDiffVec: IndexedSeq[Vec[Bool]] =
608      x.target.zip(y.getAllTargets).map {
609        case (xTarget, yAllTarget) => VecInit(yAllTarget.map(_ =/= xTarget))
610      } // [numDup][all Target comparison]
611    val targetDiff: IndexedSeq[Bool] =
612      targetDiffVec.zip(x.hit).zip(x.takenMask).map {
613        case ((diff, hit), takenMask) => selectByTaken(takenMask, hit, diff)
614      } // [numDup]
615
616    val lastBrPosOHDiff: IndexedSeq[Bool] = x.lastBrPosOH.zip(y.lastBrPosOH).map { case (oh1, oh2) =>
617      oh1.asUInt =/= oh2.asUInt
618    }
619    val takenDiff: IndexedSeq[Bool] = x.taken.zip(y.taken).map { case (t1, t2) => t1 =/= t2 }
620    val takenOffsetDiff: IndexedSeq[Bool] = x.cfiIndex.zip(y.cfiIndex).zip(x.taken).zip(y.taken).map {
621      case (((i1, i2), xt), yt) => xt && yt && i1 =/= i2.bits
622    }
623    VecInit(
624      for (
625        (((tgtd, lbpohd), tkd), tod) <-
626          targetDiff zip lastBrPosOHDiff zip takenDiff zip takenOffsetDiff
627      )
628        yield VecInit(tgtd, lbpohd, tkd, tod)
629      // x.shouldShiftVec.asUInt =/= y.shouldShiftVec.asUInt,
630      // x.brTaken =/= y.brTaken
631    )
632  }
633
634  // s2
635  val s2_possible_predicted_ghist_ptrs_dup = s2_ghist_ptr_dup.map(ptr => (0 to numBr).map(ptr - _.U))
636  val s2_predicted_ghist_ptr_dup = s2_possible_predicted_ghist_ptrs_dup.zip(resp.s2.lastBrPosOH).map { case (ptr, oh) =>
637    Mux1H(oh, ptr)
638  }
639
640  val s2_possible_predicted_fhs_dup =
641    for (
642      (((fgh, afh), br_num_oh), full_pred) <-
643        s2_folded_gh_dup zip s2_ahead_fh_oldest_bits_dup zip s2_last_br_num_oh_dup zip resp.s2.full_pred
644    )
645      yield (0 to numBr).map(i =>
646        fgh.update(afh, br_num_oh, i, if (i > 0) full_pred.br_taken_mask(i - 1) else false.B)
647      )
648  val s2_predicted_fh_dup = resp.s2.lastBrPosOH.zip(s2_possible_predicted_fhs_dup).map { case (oh, fh) =>
649    Mux1H(oh, fh)
650  }
651
652  val s2_ahead_fh_ob_src_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos))
653  s2_ahead_fh_ob_src_dup.zip(s2_ghist_ptr_dup).map { case (src, ptr) => src.read(ghv, ptr) }
654
655  if (EnableGHistDiff) {
656    val s2_predicted_ghist = WireInit(getHist(s2_predicted_ghist_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool())))
657    for (i <- 0 until numBr) {
658      when(resp.s2.shouldShiftVec(0)(i)) {
659        s2_predicted_ghist(i) := resp.s2.brTaken(0) && (i == 0).B
660      }
661    }
662    when(s2_redirect_dup(0)) {
663      s0_ghist := s2_predicted_ghist.asUInt
664    }
665  }
666
667  val s2_ghv_wens = (0 until HistoryLength).map(n =>
668    (0 until numBr).map(b =>
669      s2_ghist_ptr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s2.shouldShiftVec(0)(
670        b
671      ) && s2_redirect_dup(0)
672    )
673  )
674  val s2_ghv_wdatas = (0 until HistoryLength).map(n =>
675    Mux1H(
676      (0 until numBr).map(b =>
677        (
678          s2_ghist_ptr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s2.shouldShiftVec(0)(b),
679          resp.s2.full_pred(0).real_br_taken_mask()(b)
680        )
681      )
682    )
683  )
684
685  val s1_pred_info = Wire(new PreviousPredInfo)
686  s1_pred_info.hit         := resp.s1.full_pred.map(_.hit)
687  s1_pred_info.target      := resp.s1.getTarget
688  s1_pred_info.lastBrPosOH := resp.s1.lastBrPosOH
689  s1_pred_info.taken       := resp.s1.taken
690  s1_pred_info.takenMask   := resp.s1.full_pred.map(_.taken_mask_on_slot)
691  s1_pred_info.cfiIndex    := resp.s1.cfiIndex.map { case x => x.bits }
692
693  val previous_s1_pred_info = RegEnable(s1_pred_info, 0.U.asTypeOf(new PreviousPredInfo), s1_fire_dup(0))
694
695  val s2_redirect_s1_last_pred_vec_dup = preds_needs_redirect_vec_dup(previous_s1_pred_info, resp.s2)
696
697  for (
698    ((s2_redirect, s2_fire), s2_redirect_s1_last_pred_vec) <-
699      s2_redirect_dup zip s2_fire_dup zip s2_redirect_s1_last_pred_vec_dup
700  )
701    s2_redirect := s2_fire && s2_redirect_s1_last_pred_vec.reduce(_ || _)
702
703  for (((npcGen, s2_redirect), s2_target) <- npcGen_dup zip s2_redirect_dup zip resp.s2.getTarget)
704    npcGen.register(s2_redirect, s2_target, Some("s2_target"), 5)
705  for (((foldedGhGen, s2_redirect), s2_predicted_fh) <- foldedGhGen_dup zip s2_redirect_dup zip s2_predicted_fh_dup)
706    foldedGhGen.register(s2_redirect, s2_predicted_fh, Some("s2_FGH"), 5)
707  for (
708    ((ghistPtrGen, s2_redirect), s2_predicted_ghist_ptr) <-
709      ghistPtrGen_dup zip s2_redirect_dup zip s2_predicted_ghist_ptr_dup
710  )
711    ghistPtrGen.register(s2_redirect, s2_predicted_ghist_ptr, Some("s2_GHPtr"), 5)
712  for (
713    ((lastBrNumOHGen, s2_redirect), s2_brPosOH) <-
714      lastBrNumOHGen_dup zip s2_redirect_dup zip resp.s2.lastBrPosOH.map(_.asUInt)
715  )
716    lastBrNumOHGen.register(s2_redirect, s2_brPosOH, Some("s2_BrNumOH"), 5)
717  for (
718    ((aheadFhObGen, s2_redirect), s2_ahead_fh_ob_src) <- aheadFhObGen_dup zip s2_redirect_dup zip s2_ahead_fh_ob_src_dup
719  )
720    aheadFhObGen.register(s2_redirect, s2_ahead_fh_ob_src, Some("s2_AFHOB"), 5)
721  ghvBitWriteGens.zip(s2_ghv_wens).zipWithIndex.map { case ((b, w), i) =>
722    b.register(w.reduce(_ || _), s2_ghv_wdatas(i), Some(s"s2_new_bit_$i"), 5)
723  }
724
725  XSPerfAccumulate("s2_redirect_because_target_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(0))
726  XSPerfAccumulate("s2_redirect_because_branch_num_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(1))
727  XSPerfAccumulate("s2_redirect_because_direction_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(2))
728  XSPerfAccumulate("s2_redirect_because_cfi_idx_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(3))
729  // XSPerfAccumulate("s2_redirect_because_shouldShiftVec_diff", s2_fire && s2_redirect_s1_last_pred_vec(4))
730  // XSPerfAccumulate("s2_redirect_because_brTaken_diff", s2_fire && s2_redirect_s1_last_pred_vec(5))
731  XSPerfAccumulate("s2_redirect_because_fallThroughError", s2_fire_dup(0) && resp.s2.fallThruError(0))
732
733  XSPerfAccumulate("s2_redirect_when_taken", s2_redirect_dup(0) && resp.s2.taken(0) && resp.s2.full_pred(0).hit)
734  XSPerfAccumulate("s2_redirect_when_not_taken", s2_redirect_dup(0) && !resp.s2.taken(0) && resp.s2.full_pred(0).hit)
735  XSPerfAccumulate("s2_redirect_when_not_hit", s2_redirect_dup(0) && !resp.s2.full_pred(0).hit)
736
737  // s3
738  val s3_possible_predicted_ghist_ptrs_dup = s3_ghist_ptr_dup.map(ptr => (0 to numBr).map(ptr - _.U))
739  val s3_predicted_ghist_ptr_dup = s3_possible_predicted_ghist_ptrs_dup.zip(resp.s3.lastBrPosOH).map { case (ptr, oh) =>
740    Mux1H(oh, ptr)
741  }
742
743  val s3_possible_predicted_fhs_dup =
744    for (
745      (((fgh, afh), br_num_oh), full_pred) <-
746        s3_folded_gh_dup zip s3_ahead_fh_oldest_bits_dup zip s3_last_br_num_oh_dup zip resp.s3.full_pred
747    )
748      yield (0 to numBr).map(i =>
749        fgh.update(afh, br_num_oh, i, if (i > 0) full_pred.br_taken_mask(i - 1) else false.B)
750      )
751  val s3_predicted_fh_dup = resp.s3.lastBrPosOH.zip(s3_possible_predicted_fhs_dup).map { case (oh, fh) =>
752    Mux1H(oh, fh)
753  }
754
755  val s3_ahead_fh_ob_src_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos))
756  s3_ahead_fh_ob_src_dup.zip(s3_ghist_ptr_dup).map { case (src, ptr) => src.read(ghv, ptr) }
757
758  if (EnableGHistDiff) {
759    val s3_predicted_ghist = WireInit(getHist(s3_predicted_ghist_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool())))
760    for (i <- 0 until numBr) {
761      when(resp.s3.shouldShiftVec(0)(i)) {
762        s3_predicted_ghist(i) := resp.s3.brTaken(0) && (i == 0).B
763      }
764    }
765    when(s3_redirect_dup(0)) {
766      s0_ghist := s3_predicted_ghist.asUInt
767    }
768  }
769
770  val s3_ghv_wens = (0 until HistoryLength).map(n =>
771    (0 until numBr).map(b =>
772      s3_ghist_ptr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s3.shouldShiftVec(0)(
773        b
774      ) && s3_redirect_dup(0)
775    )
776  )
777  val s3_ghv_wdatas = (0 until HistoryLength).map(n =>
778    Mux1H(
779      (0 until numBr).map(b =>
780        (
781          s3_ghist_ptr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s3.shouldShiftVec(0)(b),
782          resp.s3.full_pred(0).real_br_taken_mask()(b)
783        )
784      )
785    )
786  )
787
788  val previous_s2_pred = RegEnable(resp.s2, 0.U.asTypeOf(resp.s2), s2_fire_dup(0))
789
790  val s3_redirect_on_br_taken_dup = resp.s3.full_pred.zip(previous_s2_pred.full_pred).map { case (fp1, fp2) =>
791    fp1.real_br_taken_mask().asUInt =/= fp2.real_br_taken_mask().asUInt
792  }
793  val s3_both_first_taken_dup = resp.s3.full_pred.zip(previous_s2_pred.full_pred).map { case (fp1, fp2) =>
794    fp1.real_br_taken_mask()(0) && fp2.real_br_taken_mask()(0)
795  }
796  val s3_redirect_on_target_dup = resp.s3.getTarget.zip(previous_s2_pred.getTarget).map { case (t1, t2) => t1 =/= t2 }
797  val s3_redirect_on_jalr_target_dup = resp.s3.full_pred.zip(previous_s2_pred.full_pred).map { case (fp1, fp2) =>
798    fp1.hit_taken_on_jalr && fp1.jalr_target =/= fp2.jalr_target
799  }
800  val s3_redirect_on_fall_thru_error_dup = resp.s3.fallThruError
801  val s3_redirect_on_ftb_multi_hit_dup   = resp.s3.ftbMultiHit
802
803  for (
804    (
805      (
806        ((((s3_redirect, s3_fire), s3_redirect_on_br_taken), s3_redirect_on_target), s3_redirect_on_fall_thru_error),
807        s3_redirect_on_ftb_multi_hit
808      ),
809      s3_both_first_taken
810    ) <-
811      s3_redirect_dup zip s3_fire_dup zip s3_redirect_on_br_taken_dup zip s3_redirect_on_target_dup zip s3_redirect_on_fall_thru_error_dup zip s3_redirect_on_ftb_multi_hit_dup zip s3_both_first_taken_dup
812  ) {
813
814    s3_redirect := s3_fire && (
815      (s3_redirect_on_br_taken && !s3_both_first_taken) || s3_redirect_on_target || s3_redirect_on_fall_thru_error || s3_redirect_on_ftb_multi_hit
816    )
817  }
818
819  XSPerfAccumulate(f"s3_redirect_on_br_taken", s3_fire_dup(0) && s3_redirect_on_br_taken_dup(0))
820  XSPerfAccumulate(f"s3_redirect_on_jalr_target", s3_fire_dup(0) && s3_redirect_on_jalr_target_dup(0))
821  XSPerfAccumulate(
822    f"s3_redirect_on_others",
823    s3_redirect_dup(0) && !(s3_redirect_on_br_taken_dup(0) || s3_redirect_on_jalr_target_dup(0))
824  )
825
826  for (((npcGen, s3_redirect), s3_target) <- npcGen_dup zip s3_redirect_dup zip resp.s3.getTarget)
827    npcGen.register(s3_redirect, s3_target, Some("s3_target"), 3)
828  for (((foldedGhGen, s3_redirect), s3_predicted_fh) <- foldedGhGen_dup zip s3_redirect_dup zip s3_predicted_fh_dup)
829    foldedGhGen.register(s3_redirect, s3_predicted_fh, Some("s3_FGH"), 3)
830  for (
831    ((ghistPtrGen, s3_redirect), s3_predicted_ghist_ptr) <-
832      ghistPtrGen_dup zip s3_redirect_dup zip s3_predicted_ghist_ptr_dup
833  )
834    ghistPtrGen.register(s3_redirect, s3_predicted_ghist_ptr, Some("s3_GHPtr"), 3)
835  for (
836    ((lastBrNumOHGen, s3_redirect), s3_brPosOH) <-
837      lastBrNumOHGen_dup zip s3_redirect_dup zip resp.s3.lastBrPosOH.map(_.asUInt)
838  )
839    lastBrNumOHGen.register(s3_redirect, s3_brPosOH, Some("s3_BrNumOH"), 3)
840  for (
841    ((aheadFhObGen, s3_redirect), s3_ahead_fh_ob_src) <- aheadFhObGen_dup zip s3_redirect_dup zip s3_ahead_fh_ob_src_dup
842  )
843    aheadFhObGen.register(s3_redirect, s3_ahead_fh_ob_src, Some("s3_AFHOB"), 3)
844  ghvBitWriteGens.zip(s3_ghv_wens).zipWithIndex.map { case ((b, w), i) =>
845    b.register(w.reduce(_ || _), s3_ghv_wdatas(i), Some(s"s3_new_bit_$i"), 3)
846  }
847
848  // Send signal tell Ftq override
849  val s2_ftq_idx = RegEnable(io.ftq_to_bpu.enq_ptr, s1_fire_dup(0))
850  val s3_ftq_idx = RegEnable(s2_ftq_idx, s2_fire_dup(0))
851
852  for (((to_ftq_s1_valid, s1_fire), s1_flush) <- io.bpu_to_ftq.resp.bits.s1.valid zip s1_fire_dup zip s1_flush_dup) {
853    to_ftq_s1_valid := s1_fire && !s1_flush
854  }
855  io.bpu_to_ftq.resp.bits.s1.hasRedirect.map(_ := false.B)
856  io.bpu_to_ftq.resp.bits.s1.ftq_idx := DontCare
857  for (((to_ftq_s2_valid, s2_fire), s2_flush) <- io.bpu_to_ftq.resp.bits.s2.valid zip s2_fire_dup zip s2_flush_dup) {
858    to_ftq_s2_valid := s2_fire && !s2_flush
859  }
860  io.bpu_to_ftq.resp.bits.s2.hasRedirect.zip(s2_redirect_dup).map { case (hr, r) => hr := r }
861  io.bpu_to_ftq.resp.bits.s2.ftq_idx := s2_ftq_idx
862  for (((to_ftq_s3_valid, s3_fire), s3_flush) <- io.bpu_to_ftq.resp.bits.s3.valid zip s3_fire_dup zip s3_flush_dup) {
863    to_ftq_s3_valid := s3_fire && !s3_flush
864  }
865  io.bpu_to_ftq.resp.bits.s3.hasRedirect.zip(s3_redirect_dup).map { case (hr, r) => hr := r }
866  io.bpu_to_ftq.resp.bits.s3.ftq_idx := s3_ftq_idx
867
868  predictors.io.update.valid := RegNext(io.ftq_to_bpu.update.valid, init = false.B)
869  predictors.io.update.bits  := RegEnable(io.ftq_to_bpu.update.bits, io.ftq_to_bpu.update.valid)
870  predictors.io.update.bits.ghist := RegEnable(
871    getHist(io.ftq_to_bpu.update.bits.spec_info.histPtr),
872    io.ftq_to_bpu.update.valid
873  )
874
875  val redirect_dup = do_redirect_dup.map(_.bits)
876  predictors.io.redirect := do_redirect_dup(0)
877
878  // Redirect logic
879  val shift_dup       = redirect_dup.map(_.cfiUpdate.shift)
880  val addIntoHist_dup = redirect_dup.map(_.cfiUpdate.addIntoHist)
881  // TODO: remove these below
882  val shouldShiftVec_dup = shift_dup.map(shift =>
883    Mux(
884      shift === 0.U,
885      VecInit(0.U((1 << (log2Ceil(numBr) + 1)).W).asBools),
886      VecInit(LowerMask(1.U << (shift - 1.U)).asBools)
887    )
888  )
889  // TODO end
890  val afhob_dup       = redirect_dup.map(_.cfiUpdate.afhob)
891  val lastBrNumOH_dup = redirect_dup.map(_.cfiUpdate.lastBrNumOH)
892
893  val isBr_dup  = redirect_dup.map(_.cfiUpdate.pd.isBr)
894  val taken_dup = redirect_dup.map(_.cfiUpdate.taken)
895  val real_br_taken_mask_dup =
896    for (((shift, taken), addIntoHist) <- shift_dup zip taken_dup zip addIntoHist_dup)
897      yield (0 until numBr).map(i => shift === (i + 1).U && taken && addIntoHist)
898
899  val oldPtr_dup      = redirect_dup.map(_.cfiUpdate.histPtr)
900  val updated_ptr_dup = oldPtr_dup.zip(shift_dup).map { case (oldPtr, shift) => oldPtr - shift }
901  def computeFoldedHist(hist: UInt, compLen: Int)(histLen: Int): UInt =
902    if (histLen > 0) {
903      val nChunks     = (histLen + compLen - 1) / compLen
904      val hist_chunks = (0 until nChunks) map { i => hist(min((i + 1) * compLen, histLen) - 1, i * compLen) }
905      ParallelXOR(hist_chunks)
906    } else 0.U
907
908  val oldFh_dup = dup_seq(WireInit(0.U.asTypeOf(new AllFoldedHistories(foldedGHistInfos))))
909  oldFh_dup.zip(oldPtr_dup).map { case (oldFh, oldPtr) =>
910    foldedGHistInfos.foreach { case (histLen, compLen) =>
911      oldFh.getHistWithInfo((histLen, compLen)).folded_hist := computeFoldedHist(getHist(oldPtr), compLen)(histLen)
912    }
913  }
914
915  val updated_fh_dup =
916    for (
917      ((((oldFh, oldPtr), taken), addIntoHist), shift) <-
918        oldFh_dup zip oldPtr_dup zip taken_dup zip addIntoHist_dup zip shift_dup
919    )
920      yield VecInit((0 to numBr).map(i => oldFh.update(ghv, oldPtr, i, taken && addIntoHist)))(shift)
921  val thisBrNumOH_dup   = shift_dup.map(shift => UIntToOH(shift, numBr + 1))
922  val thisAheadFhOb_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos))
923  thisAheadFhOb_dup.zip(oldPtr_dup).map { case (afhob, oldPtr) => afhob.read(ghv, oldPtr) }
924  val redirect_ghv_wens = (0 until HistoryLength).map(n =>
925    (0 until numBr).map(b =>
926      oldPtr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && shouldShiftVec_dup(0)(b) && do_redirect_dup(0).valid
927    )
928  )
929  val redirect_ghv_wdatas = (0 until HistoryLength).map(n =>
930    Mux1H(
931      (0 until numBr).map(b => oldPtr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && shouldShiftVec_dup(0)(b)),
932      real_br_taken_mask_dup(0)
933    )
934  )
935
936  if (EnableGHistDiff) {
937    val updated_ghist = WireInit(getHist(updated_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool())))
938    for (i <- 0 until numBr) {
939      when(shift_dup(0) >= (i + 1).U) {
940        updated_ghist(i) := taken_dup(0) && addIntoHist_dup(0) && (i == 0).B
941      }
942    }
943    when(do_redirect_dup(0).valid) {
944      s0_ghist := updated_ghist.asUInt
945    }
946  }
947
948  // Commit time history checker
949  if (EnableCommitGHistDiff) {
950    val commitGHist    = RegInit(0.U.asTypeOf(Vec(HistoryLength, Bool())))
951    val commitGHistPtr = RegInit(0.U.asTypeOf(new CGHPtr))
952    def getCommitHist(ptr: CGHPtr): UInt =
953      (Cat(commitGHist.asUInt, commitGHist.asUInt) >> (ptr.value + 1.U))(HistoryLength - 1, 0)
954
955    val updateValid:         Bool      = io.ftq_to_bpu.update.valid
956    val branchValidMask:     UInt      = io.ftq_to_bpu.update.bits.ftb_entry.brValids.asUInt
957    val branchCommittedMask: Vec[Bool] = io.ftq_to_bpu.update.bits.br_committed
958    val misPredictMask:      UInt      = io.ftq_to_bpu.update.bits.mispred_mask.asUInt
959    val takenMask: UInt =
960      io.ftq_to_bpu.update.bits.br_taken_mask.asUInt |
961        io.ftq_to_bpu.update.bits.ftb_entry.always_taken.asUInt // Always taken branch is recorded in history
962    val takenIdx:      UInt = (PriorityEncoder(takenMask) + 1.U((log2Ceil(numBr) + 1).W)).asUInt
963    val misPredictIdx: UInt = (PriorityEncoder(misPredictMask) + 1.U((log2Ceil(numBr) + 1).W)).asUInt
964    val shouldShiftMask: UInt = Mux(takenMask.orR, LowerMask(takenIdx).asUInt, ((1 << numBr) - 1).asUInt) &
965      Mux(misPredictMask.orR, LowerMask(misPredictIdx).asUInt, ((1 << numBr) - 1).asUInt) &
966      branchCommittedMask.asUInt
967    val updateShift: UInt =
968      Mux(updateValid && branchValidMask.orR, PopCount(branchValidMask & shouldShiftMask), 0.U)
969
970    // Maintain the commitGHist
971    for (i <- 0 until numBr) {
972      when(updateShift >= (i + 1).U) {
973        val ptr: CGHPtr = commitGHistPtr - i.asUInt
974        commitGHist(ptr.value) := takenMask(i)
975      }
976    }
977    when(updateValid) {
978      commitGHistPtr := commitGHistPtr - updateShift
979    }
980
981    // Calculate true history using Parallel XOR
982    // Do differential
983    TageTableInfos.map {
984      case (nRows, histLen, _) => {
985        val nRowsPerBr      = nRows / numBr
986        val predictGHistPtr = io.ftq_to_bpu.update.bits.spec_info.histPtr
987        val commitTrueHist: UInt = computeFoldedHist(getCommitHist(commitGHistPtr), log2Ceil(nRowsPerBr))(histLen)
988        val predictFHist:   UInt = computeFoldedHist(getHist(predictGHistPtr), log2Ceil(nRowsPerBr))(histLen)
989        XSWarn(
990          updateValid && predictFHist =/= commitTrueHist,
991          p"predict time ghist: ${predictFHist} is different from commit time: ${commitTrueHist}\n"
992        )
993      }
994    }
995  }
996
997  // val updatedGh = oldGh.update(shift, taken && addIntoHist)
998  for ((npcGen, do_redirect) <- npcGen_dup zip do_redirect_dup)
999    npcGen.register(do_redirect.valid, do_redirect.bits.cfiUpdate.target, Some("redirect_target"), 2)
1000  for (((foldedGhGen, do_redirect), updated_fh) <- foldedGhGen_dup zip do_redirect_dup zip updated_fh_dup)
1001    foldedGhGen.register(do_redirect.valid, updated_fh, Some("redirect_FGHT"), 2)
1002  for (((ghistPtrGen, do_redirect), updated_ptr) <- ghistPtrGen_dup zip do_redirect_dup zip updated_ptr_dup)
1003    ghistPtrGen.register(do_redirect.valid, updated_ptr, Some("redirect_GHPtr"), 2)
1004  for (((lastBrNumOHGen, do_redirect), thisBrNumOH) <- lastBrNumOHGen_dup zip do_redirect_dup zip thisBrNumOH_dup)
1005    lastBrNumOHGen.register(do_redirect.valid, thisBrNumOH, Some("redirect_BrNumOH"), 2)
1006  for (((aheadFhObGen, do_redirect), thisAheadFhOb) <- aheadFhObGen_dup zip do_redirect_dup zip thisAheadFhOb_dup)
1007    aheadFhObGen.register(do_redirect.valid, thisAheadFhOb, Some("redirect_AFHOB"), 2)
1008  ghvBitWriteGens.zip(redirect_ghv_wens).zipWithIndex.map { case ((b, w), i) =>
1009    b.register(w.reduce(_ || _), redirect_ghv_wdatas(i), Some(s"redirect_new_bit_$i"), 2)
1010  }
1011  // no need to assign s0_last_pred
1012
1013  // val need_reset = RegNext(reset.asBool) && !reset.asBool
1014
1015  // Reset
1016  // npcGen.register(need_reset, resetVector.U, Some("reset_pc"), 1)
1017  // foldedGhGen.register(need_reset, 0.U.asTypeOf(s0_folded_gh), Some("reset_FGH"), 1)
1018  // ghistPtrGen.register(need_reset, 0.U.asTypeOf(new CGHPtr), Some("reset_GHPtr"), 1)
1019
1020  s0_pc_dup.zip(npcGen_dup).map { case (s0_pc, npcGen) => s0_pc := npcGen() }
1021  s0_folded_gh_dup.zip(foldedGhGen_dup).map { case (s0_folded_gh, foldedGhGen) => s0_folded_gh := foldedGhGen() }
1022  s0_ghist_ptr_dup.zip(ghistPtrGen_dup).map { case (s0_ghist_ptr, ghistPtrGen) => s0_ghist_ptr := ghistPtrGen() }
1023  s0_ahead_fh_oldest_bits_dup.zip(aheadFhObGen_dup).map { case (s0_ahead_fh_oldest_bits, aheadFhObGen) =>
1024    s0_ahead_fh_oldest_bits := aheadFhObGen()
1025  }
1026  s0_last_br_num_oh_dup.zip(lastBrNumOHGen_dup).map { case (s0_last_br_num_oh, lastBrNumOHGen) =>
1027    s0_last_br_num_oh := lastBrNumOHGen()
1028  }
1029  (ghv_write_datas zip ghvBitWriteGens).map { case (wd, d) => wd := d() }
1030  for (i <- 0 until HistoryLength) {
1031    ghv_wens(i) := Seq(s1_ghv_wens, s2_ghv_wens, s3_ghv_wens, redirect_ghv_wens).map(_(i).reduce(_ || _)).reduce(_ || _)
1032    when(ghv_wens(i)) {
1033      ghv(i) := ghv_write_datas(i)
1034    }
1035  }
1036
1037  // TODO: signals for memVio and other Redirects
1038  controlRedirectBubble := do_redirect_dup(0).valid && do_redirect_dup(0).bits.ControlRedirectBubble
1039  ControlBTBMissBubble  := do_redirect_dup(0).bits.ControlBTBMissBubble
1040  TAGEMissBubble        := do_redirect_dup(0).bits.TAGEMissBubble
1041  SCMissBubble          := do_redirect_dup(0).bits.SCMissBubble
1042  ITTAGEMissBubble      := do_redirect_dup(0).bits.ITTAGEMissBubble
1043  RASMissBubble         := do_redirect_dup(0).bits.RASMissBubble
1044
1045  memVioRedirectBubble                 := do_redirect_dup(0).valid && do_redirect_dup(0).bits.MemVioRedirectBubble
1046  otherRedirectBubble                  := do_redirect_dup(0).valid && do_redirect_dup(0).bits.OtherRedirectBubble
1047  btbMissBubble                        := do_redirect_dup(0).valid && do_redirect_dup(0).bits.BTBMissBubble
1048  overrideBubble(0)                    := s2_redirect_dup(0)
1049  overrideBubble(1)                    := s3_redirect_dup(0)
1050  ftqUpdateBubble(0)                   := !s1_components_ready_dup(0)
1051  ftqUpdateBubble(1)                   := !s2_components_ready_dup(0)
1052  ftqUpdateBubble(2)                   := !s3_components_ready_dup(0)
1053  ftqFullStall                         := !io.bpu_to_ftq.resp.ready
1054  io.bpu_to_ftq.resp.bits.topdown_info := topdown_stages(numOfStage - 1)
1055
1056  // topdown handling logic here
1057  when(controlRedirectBubble) {
1058    /*
1059    for (i <- 0 until numOfStage)
1060      topdown_stages(i).reasons(TopDownCounters.ControlRedirectBubble.id) := true.B
1061    io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.ControlRedirectBubble.id) := true.B
1062     */
1063    when(ControlBTBMissBubble) {
1064      for (i <- 0 until numOfStage)
1065        topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id)                  := true.B
1066      io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.BTBMissBubble.id) := true.B
1067    }.elsewhen(TAGEMissBubble) {
1068      for (i <- 0 until numOfStage)
1069        topdown_stages(i).reasons(TopDownCounters.TAGEMissBubble.id)                  := true.B
1070      io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.TAGEMissBubble.id) := true.B
1071    }.elsewhen(SCMissBubble) {
1072      for (i <- 0 until numOfStage)
1073        topdown_stages(i).reasons(TopDownCounters.SCMissBubble.id)                  := true.B
1074      io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.SCMissBubble.id) := true.B
1075    }.elsewhen(ITTAGEMissBubble) {
1076      for (i <- 0 until numOfStage)
1077        topdown_stages(i).reasons(TopDownCounters.ITTAGEMissBubble.id)                  := true.B
1078      io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B
1079    }.elsewhen(RASMissBubble) {
1080      for (i <- 0 until numOfStage)
1081        topdown_stages(i).reasons(TopDownCounters.RASMissBubble.id)                  := true.B
1082      io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.RASMissBubble.id) := true.B
1083    }
1084  }
1085  when(memVioRedirectBubble) {
1086    for (i <- 0 until numOfStage)
1087      topdown_stages(i).reasons(TopDownCounters.MemVioRedirectBubble.id)                  := true.B
1088    io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B
1089  }
1090  when(otherRedirectBubble) {
1091    for (i <- 0 until numOfStage)
1092      topdown_stages(i).reasons(TopDownCounters.OtherRedirectBubble.id)                  := true.B
1093    io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.OtherRedirectBubble.id) := true.B
1094  }
1095  when(btbMissBubble) {
1096    for (i <- 0 until numOfStage)
1097      topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id)                  := true.B
1098    io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.BTBMissBubble.id) := true.B
1099  }
1100
1101  for (i <- 0 until numOfStage) {
1102    if (i < numOfStage - overrideStage) {
1103      when(overrideBubble(i)) {
1104        for (j <- 0 to i)
1105          topdown_stages(j).reasons(TopDownCounters.OverrideBubble.id) := true.B
1106      }
1107    }
1108    if (i < numOfStage - ftqUpdateStage) {
1109      when(ftqUpdateBubble(i)) {
1110        topdown_stages(i).reasons(TopDownCounters.FtqUpdateBubble.id) := true.B
1111      }
1112    }
1113  }
1114  when(ftqFullStall) {
1115    topdown_stages(0).reasons(TopDownCounters.FtqFullStall.id) := true.B
1116  }
1117
1118  XSError(
1119    isBefore(redirect_dup(0).cfiUpdate.histPtr, s3_ghist_ptr_dup(0)) && do_redirect_dup(0).valid,
1120    p"s3_ghist_ptr ${s3_ghist_ptr_dup(0)} exceeds redirect histPtr ${redirect_dup(0).cfiUpdate.histPtr}\n"
1121  )
1122  XSError(
1123    isBefore(redirect_dup(0).cfiUpdate.histPtr, s2_ghist_ptr_dup(0)) && do_redirect_dup(0).valid,
1124    p"s2_ghist_ptr ${s2_ghist_ptr_dup(0)} exceeds redirect histPtr ${redirect_dup(0).cfiUpdate.histPtr}\n"
1125  )
1126  XSError(
1127    isBefore(redirect_dup(0).cfiUpdate.histPtr, s1_ghist_ptr_dup(0)) && do_redirect_dup(0).valid,
1128    p"s1_ghist_ptr ${s1_ghist_ptr_dup(0)} exceeds redirect histPtr ${redirect_dup(0).cfiUpdate.histPtr}\n"
1129  )
1130
1131  XSDebug(RegNext(reset.asBool) && !reset.asBool, "Reseting...\n")
1132  XSDebug(io.ftq_to_bpu.update.valid, p"Update from ftq\n")
1133  XSDebug(io.ftq_to_bpu.redirect.valid, p"Redirect from ftq\n")
1134
1135  XSDebug("[BP0]                 fire=%d                      pc=%x\n", s0_fire_dup(0), s0_pc_dup(0))
1136  XSDebug(
1137    "[BP1] v=%d r=%d cr=%d fire=%d             flush=%d pc=%x\n",
1138    s1_valid_dup(0),
1139    s1_ready_dup(0),
1140    s1_components_ready_dup(0),
1141    s1_fire_dup(0),
1142    s1_flush_dup(0),
1143    s1_pc
1144  )
1145  XSDebug(
1146    "[BP2] v=%d r=%d cr=%d fire=%d redirect=%d flush=%d pc=%x\n",
1147    s2_valid_dup(0),
1148    s2_ready_dup(0),
1149    s2_components_ready_dup(0),
1150    s2_fire_dup(0),
1151    s2_redirect_dup(0),
1152    s2_flush_dup(0),
1153    s2_pc
1154  )
1155  XSDebug(
1156    "[BP3] v=%d r=%d cr=%d fire=%d redirect=%d flush=%d pc=%x\n",
1157    s3_valid_dup(0),
1158    s3_ready_dup(0),
1159    s3_components_ready_dup(0),
1160    s3_fire_dup(0),
1161    s3_redirect_dup(0),
1162    s3_flush_dup(0),
1163    s3_pc
1164  )
1165  XSDebug("[FTQ] ready=%d\n", io.bpu_to_ftq.resp.ready)
1166  XSDebug("resp.s1.target=%x\n", resp.s1.getTarget(0))
1167  XSDebug("resp.s2.target=%x\n", resp.s2.getTarget(0))
1168  // XSDebug("s0_ghist: %b\n", s0_ghist.predHist)
1169  // XSDebug("s1_ghist: %b\n", s1_ghist.predHist)
1170  // XSDebug("s2_ghist: %b\n", s2_ghist.predHist)
1171  // XSDebug("s2_predicted_ghist: %b\n", s2_predicted_ghist.predHist)
1172  XSDebug(p"s0_ghist_ptr: ${s0_ghist_ptr_dup(0)}\n")
1173  XSDebug(p"s1_ghist_ptr: ${s1_ghist_ptr_dup(0)}\n")
1174  XSDebug(p"s2_ghist_ptr: ${s2_ghist_ptr_dup(0)}\n")
1175  XSDebug(p"s3_ghist_ptr: ${s3_ghist_ptr_dup(0)}\n")
1176
1177  io.ftq_to_bpu.update.bits.display(io.ftq_to_bpu.update.valid)
1178  io.ftq_to_bpu.redirect.bits.display(io.ftq_to_bpu.redirect.valid)
1179
1180  XSPerfAccumulate("s2_redirect", s2_redirect_dup(0))
1181  XSPerfAccumulate("s3_redirect", s3_redirect_dup(0))
1182  XSPerfAccumulate("s1_not_valid", !s1_valid_dup(0))
1183
1184  val perfEvents = predictors.asInstanceOf[Composer].getPerfEvents
1185  generatePerfEvent()
1186}
1187