xref: /XiangShan/src/main/scala/xiangshan/frontend/BPU.scala (revision 211d620b07edb797ba35b635d24fef4e7294bae2)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.frontend
18
19import chisel3._
20import chisel3.util._
21import org.chipsalliance.cde.config.Parameters
22import scala.math.min
23import utility._
24import xiangshan._
25
26trait HasBPUConst extends HasXSParameter {
27  val MaxMetaBaseLength = if (!env.FPGAPlatform) 512 else 256 // TODO: Reduce meta length
28  val MaxMetaLength     = if (HasHExtension) MaxMetaBaseLength + 4 else MaxMetaBaseLength
29  val MaxBasicBlockSize = 32
30  val LHistoryLength    = 32
31  // val numBr = 2
32  val useBPD    = true
33  val useLHist  = true
34  val numBrSlot = numBr - 1
35  val totalSlot = numBrSlot + 1
36
37  val numDup = 4
38
39  // Used to gate PC higher parts
40  val pcSegments = Seq(VAddrBits - 24, 12, 12)
41
42  def BP_STAGES = (0 until 3).map(_.U(2.W))
43  def BP_S1     = BP_STAGES(0)
44  def BP_S2     = BP_STAGES(1)
45  def BP_S3     = BP_STAGES(2)
46
47  def dup_seq[T](src:          T, num: Int = numDup) = Seq.tabulate(num)(n => src)
48  def dup[T <: Data](src:      T, num: Int = numDup) = VecInit(Seq.tabulate(num)(n => src))
49  def dup_wire[T <: Data](src: T, num: Int = numDup) = Wire(Vec(num, src.cloneType))
50  def dup_idx     = Seq.tabulate(numDup)(n => n.toString())
51  val numBpStages = BP_STAGES.length
52
53  val debug = true
54  // TODO: Replace log2Up by log2Ceil
55}
56
57trait HasBPUParameter extends HasXSParameter with HasBPUConst {
58  val BPUDebug            = true && !env.FPGAPlatform && env.EnablePerfDebug
59  val EnableCFICommitLog  = true
60  val EnbaleCFIPredLog    = true
61  val EnableBPUTimeRecord = (EnableCFICommitLog || EnbaleCFIPredLog) && !env.FPGAPlatform
62  val EnableCommit        = false
63}
64
65class BPUCtrl(implicit p: Parameters) extends XSBundle {
66  val ubtb_enable = Bool()
67  val btb_enable  = Bool()
68  val bim_enable  = Bool()
69  val tage_enable = Bool()
70  val sc_enable   = Bool()
71  val ras_enable  = Bool()
72  val loop_enable = Bool()
73}
74
75trait BPUUtils extends HasXSParameter {
76  // circular shifting
77  def circularShiftLeft(source: UInt, len: Int, shamt: UInt): UInt = {
78    val res    = Wire(UInt(len.W))
79    val higher = source << shamt
80    val lower  = source >> (len.U - shamt)
81    res := higher | lower
82    res
83  }
84
85  def circularShiftRight(source: UInt, len: Int, shamt: UInt): UInt = {
86    val res    = Wire(UInt(len.W))
87    val higher = source << (len.U - shamt)
88    val lower  = source >> shamt
89    res := higher | lower
90    res
91  }
92
93  // To be verified
94  def satUpdate(old: UInt, len: Int, taken: Bool): UInt = {
95    val oldSatTaken    = old === ((1 << len) - 1).U
96    val oldSatNotTaken = old === 0.U
97    Mux(oldSatTaken && taken, ((1 << len) - 1).U, Mux(oldSatNotTaken && !taken, 0.U, Mux(taken, old + 1.U, old - 1.U)))
98  }
99
100  def signedSatUpdate(old: SInt, len: Int, taken: Bool): SInt = {
101    val oldSatTaken    = old === ((1 << (len - 1)) - 1).S
102    val oldSatNotTaken = old === (-(1 << (len - 1))).S
103    Mux(
104      oldSatTaken && taken,
105      ((1 << (len - 1)) - 1).S,
106      Mux(oldSatNotTaken && !taken, (-(1 << (len - 1))).S, Mux(taken, old + 1.S, old - 1.S))
107    )
108  }
109
110  def getFallThroughAddr(start: UInt, carry: Bool, pft: UInt) = {
111    val higher = start.head(VAddrBits - log2Ceil(PredictWidth) - instOffsetBits)
112    Cat(Mux(carry, higher + 1.U, higher), pft, 0.U(instOffsetBits.W))
113  }
114
115  def foldTag(tag: UInt, l: Int): UInt = {
116    val nChunks = (tag.getWidth + l - 1) / l
117    val chunks  = (0 until nChunks).map(i => tag(min((i + 1) * l, tag.getWidth) - 1, i * l))
118    ParallelXOR(chunks)
119  }
120}
121
122class BasePredictorInput(implicit p: Parameters) extends XSBundle with HasBPUConst {
123  def nInputs = 1
124
125  val s0_pc = Vec(numDup, UInt(VAddrBits.W))
126
127  val folded_hist    = Vec(numDup, new AllFoldedHistories(foldedGHistInfos))
128  val s1_folded_hist = Vec(numDup, new AllFoldedHistories(foldedGHistInfos))
129  val ghist          = UInt(HistoryLength.W)
130
131  val resp_in = Vec(nInputs, new BranchPredictionResp)
132
133  // val final_preds = Vec(numBpStages, new)
134  // val toFtq_fire = Bool()
135
136  // val s0_all_ready = Bool()
137}
138
139class BasePredictorOutput(implicit p: Parameters) extends BranchPredictionResp {}
140
141class BasePredictorIO(implicit p: Parameters) extends XSBundle with HasBPUConst {
142  val reset_vector = Input(UInt(PAddrBits.W))
143  val in           = Flipped(DecoupledIO(new BasePredictorInput)) // TODO: Remove DecoupledIO
144  // val out = DecoupledIO(new BasePredictorOutput)
145  val out = Output(new BasePredictorOutput)
146  // val flush_out = Valid(UInt(VAddrBits.W))
147
148  val fauftb_entry_in      = Input(new FTBEntry)
149  val fauftb_entry_hit_in  = Input(Bool())
150  val fauftb_entry_out     = Output(new FTBEntry)
151  val fauftb_entry_hit_out = Output(Bool())
152
153  val ctrl = Input(new BPUCtrl)
154
155  val s0_fire = Input(Vec(numDup, Bool()))
156  val s1_fire = Input(Vec(numDup, Bool()))
157  val s2_fire = Input(Vec(numDup, Bool()))
158  val s3_fire = Input(Vec(numDup, Bool()))
159
160  val s2_redirect = Input(Vec(numDup, Bool()))
161  val s3_redirect = Input(Vec(numDup, Bool()))
162
163  val s1_ready = Output(Bool())
164  val s2_ready = Output(Bool())
165  val s3_ready = Output(Bool())
166
167  val update          = Flipped(Valid(new BranchPredictionUpdate))
168  val redirect        = Flipped(Valid(new BranchPredictionRedirect))
169  val redirectFromIFU = Input(Bool())
170}
171
172abstract class BasePredictor(implicit p: Parameters) extends XSModule
173    with HasBPUConst with BPUUtils with HasPerfEvents {
174  val meta_size      = 0
175  val spec_meta_size = 0
176  val is_fast_pred   = false
177  val io             = IO(new BasePredictorIO())
178
179  io.out := io.in.bits.resp_in(0)
180
181  io.fauftb_entry_out     := io.fauftb_entry_in
182  io.fauftb_entry_hit_out := io.fauftb_entry_hit_in
183
184  io.out.last_stage_meta := 0.U
185
186  io.in.ready := !io.redirect.valid
187
188  io.s1_ready := true.B
189  io.s2_ready := true.B
190  io.s3_ready := true.B
191
192  val s0_pc_dup = WireInit(io.in.bits.s0_pc) // fetchIdx(io.f0_pc)
193  val s1_pc_dup = s0_pc_dup.zip(io.s0_fire).map { case (s0_pc, s0_fire) => RegEnable(s0_pc, s0_fire) }
194  val s2_pc_dup = s1_pc_dup.zip(io.s1_fire).map { case (s1_pc, s1_fire) =>
195    SegmentedAddrNext(s1_pc, pcSegments, s1_fire, Some("s2_pc"))
196  }
197  val s3_pc_dup = s2_pc_dup.zip(io.s2_fire).map { case (s2_pc, s2_fire) =>
198    SegmentedAddrNext(s2_pc, s2_fire, Some("s3_pc"))
199  }
200
201  when(RegNext(RegNext(reset.asBool) && !reset.asBool)) {
202    s1_pc_dup.map { case s1_pc => s1_pc := io.reset_vector }
203  }
204
205  io.out.s1.pc := s1_pc_dup
206  io.out.s2.pc := s2_pc_dup.map(_.getAddr())
207  io.out.s3.pc := s3_pc_dup.map(_.getAddr())
208
209  val perfEvents: Seq[(String, UInt)] = Seq()
210
211  def getFoldedHistoryInfo: Option[Set[FoldedHistoryInfo]] = None
212}
213
214class FakePredictor(implicit p: Parameters) extends BasePredictor {
215  io.in.ready            := true.B
216  io.out.last_stage_meta := 0.U
217  io.out                 := io.in.bits.resp_in(0)
218}
219
220class BpuToFtqIO(implicit p: Parameters) extends XSBundle {
221  val resp = DecoupledIO(new BpuToFtqBundle())
222}
223
224class PredictorIO(implicit p: Parameters) extends XSBundle {
225  val bpu_to_ftq   = new BpuToFtqIO()
226  val ftq_to_bpu   = Flipped(new FtqToBpuIO)
227  val ctrl         = Input(new BPUCtrl)
228  val reset_vector = Input(UInt(PAddrBits.W))
229}
230
231class Predictor(implicit p: Parameters) extends XSModule with HasBPUConst with HasPerfEvents
232    with HasCircularQueuePtrHelper {
233  val io = IO(new PredictorIO)
234
235  val ctrl       = DelayN(io.ctrl, 1)
236  val predictors = Module(if (useBPD) new Composer else new FakePredictor)
237
238  def numOfStage = 3
239  require(numOfStage > 1, "BPU numOfStage must be greater than 1")
240  val topdown_stages = RegInit(VecInit(Seq.fill(numOfStage)(0.U.asTypeOf(new FrontendTopDownBundle))))
241
242  // following can only happen on s1
243  val controlRedirectBubble = Wire(Bool())
244  val ControlBTBMissBubble  = Wire(Bool())
245  val TAGEMissBubble        = Wire(Bool())
246  val SCMissBubble          = Wire(Bool())
247  val ITTAGEMissBubble      = Wire(Bool())
248  val RASMissBubble         = Wire(Bool())
249
250  val memVioRedirectBubble = Wire(Bool())
251  val otherRedirectBubble  = Wire(Bool())
252  val btbMissBubble        = Wire(Bool())
253  otherRedirectBubble  := false.B
254  memVioRedirectBubble := false.B
255
256  // override can happen between s1-s2 and s2-s3
257  val overrideBubble = Wire(Vec(numOfStage - 1, Bool()))
258  def overrideStage  = 1
259  // ftq update block can happen on s1, s2 and s3
260  val ftqUpdateBubble = Wire(Vec(numOfStage, Bool()))
261  def ftqUpdateStage  = 0
262  // ftq full stall only happens on s3 (last stage)
263  val ftqFullStall = Wire(Bool())
264
265  // by default, no bubble event
266  topdown_stages(0) := 0.U.asTypeOf(new FrontendTopDownBundle)
267  // event movement driven by clock only
268  for (i <- 0 until numOfStage - 1) {
269    topdown_stages(i + 1) := topdown_stages(i)
270  }
271
272  // ctrl signal
273  predictors.io.ctrl         := ctrl
274  predictors.io.reset_vector := io.reset_vector
275
276  val s0_stall_dup = dup_wire(Bool()) // For some reason s0 stalled, usually FTQ Full
277  val s0_fire_dup, s1_fire_dup, s2_fire_dup, s3_fire_dup                        = dup_wire(Bool())
278  val s1_valid_dup, s2_valid_dup, s3_valid_dup                                  = dup_seq(RegInit(false.B))
279  val s1_ready_dup, s2_ready_dup, s3_ready_dup                                  = dup_wire(Bool())
280  val s1_components_ready_dup, s2_components_ready_dup, s3_components_ready_dup = dup_wire(Bool())
281
282  val s0_pc_dup     = dup(WireInit(0.U.asTypeOf(UInt(VAddrBits.W))))
283  val s0_pc_reg_dup = s0_pc_dup.zip(s0_stall_dup).map { case (s0_pc, s0_stall) => RegEnable(s0_pc, !s0_stall) }
284  when(RegNext(RegNext(reset.asBool) && !reset.asBool)) {
285    s0_pc_reg_dup.map { case s0_pc => s0_pc := io.reset_vector }
286  }
287  val s1_pc = RegEnable(s0_pc_dup(0), s0_fire_dup(0))
288  val s2_pc = RegEnable(s1_pc, s1_fire_dup(0))
289  val s3_pc = RegEnable(s2_pc, s2_fire_dup(0))
290
291  val s0_folded_gh_dup = dup_wire(new AllFoldedHistories(foldedGHistInfos))
292  val s0_folded_gh_reg_dup = s0_folded_gh_dup.zip(s0_stall_dup).map {
293    case (x, s0_stall) => RegEnable(x, 0.U.asTypeOf(s0_folded_gh_dup(0)), !s0_stall)
294  }
295  val s1_folded_gh_dup = RegEnable(s0_folded_gh_dup, 0.U.asTypeOf(s0_folded_gh_dup), s0_fire_dup(1))
296  val s2_folded_gh_dup = RegEnable(s1_folded_gh_dup, 0.U.asTypeOf(s0_folded_gh_dup), s1_fire_dup(1))
297  val s3_folded_gh_dup = RegEnable(s2_folded_gh_dup, 0.U.asTypeOf(s0_folded_gh_dup), s2_fire_dup(1))
298
299  val s0_last_br_num_oh_dup = dup_wire(UInt((numBr + 1).W))
300  val s0_last_br_num_oh_reg_dup = s0_last_br_num_oh_dup.zip(s0_stall_dup).map {
301    case (x, s0_stall) => RegEnable(x, 0.U, !s0_stall)
302  }
303  val s1_last_br_num_oh_dup = RegEnable(s0_last_br_num_oh_dup, 0.U.asTypeOf(s0_last_br_num_oh_dup), s0_fire_dup(1))
304  val s2_last_br_num_oh_dup = RegEnable(s1_last_br_num_oh_dup, 0.U.asTypeOf(s0_last_br_num_oh_dup), s1_fire_dup(1))
305  val s3_last_br_num_oh_dup = RegEnable(s2_last_br_num_oh_dup, 0.U.asTypeOf(s0_last_br_num_oh_dup), s2_fire_dup(1))
306
307  val s0_ahead_fh_oldest_bits_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos))
308  val s0_ahead_fh_oldest_bits_reg_dup = s0_ahead_fh_oldest_bits_dup.zip(s0_stall_dup).map {
309    case (x, s0_stall) => RegEnable(x, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup(0)), !s0_stall)
310  }
311  val s1_ahead_fh_oldest_bits_dup =
312    RegEnable(s0_ahead_fh_oldest_bits_dup, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup), s0_fire_dup(1))
313  val s2_ahead_fh_oldest_bits_dup =
314    RegEnable(s1_ahead_fh_oldest_bits_dup, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup), s1_fire_dup(1))
315  val s3_ahead_fh_oldest_bits_dup =
316    RegEnable(s2_ahead_fh_oldest_bits_dup, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup), s2_fire_dup(1))
317
318  val npcGen_dup         = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[UInt])
319  val foldedGhGen_dup    = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[AllFoldedHistories])
320  val ghistPtrGen_dup    = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[CGHPtr])
321  val lastBrNumOHGen_dup = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[UInt])
322  val aheadFhObGen_dup   = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[AllAheadFoldedHistoryOldestBits])
323
324  val ghvBitWriteGens = Seq.tabulate(HistoryLength)(n => new PhyPriorityMuxGenerator[Bool])
325  // val ghistGen = new PhyPriorityMuxGenerator[UInt]
326
327  val ghv      = RegInit(0.U.asTypeOf(Vec(HistoryLength, Bool())))
328  val ghv_wire = WireInit(ghv)
329
330  val s0_ghist = WireInit(0.U.asTypeOf(UInt(HistoryLength.W)))
331
332  println(f"history buffer length ${HistoryLength}")
333  val ghv_write_datas = Wire(Vec(HistoryLength, Bool()))
334  val ghv_wens        = Wire(Vec(HistoryLength, Bool()))
335
336  val s0_ghist_ptr_dup = dup_wire(new CGHPtr)
337  val s0_ghist_ptr_reg_dup = s0_ghist_ptr_dup.zip(s0_stall_dup).map {
338    case (x, s0_stall) => RegEnable(x, 0.U.asTypeOf(new CGHPtr), !s0_stall)
339  }
340  val s1_ghist_ptr_dup = RegEnable(s0_ghist_ptr_dup, 0.U.asTypeOf(s0_ghist_ptr_dup), s0_fire_dup(1))
341  val s2_ghist_ptr_dup = RegEnable(s1_ghist_ptr_dup, 0.U.asTypeOf(s0_ghist_ptr_dup), s1_fire_dup(1))
342  val s3_ghist_ptr_dup = RegEnable(s2_ghist_ptr_dup, 0.U.asTypeOf(s0_ghist_ptr_dup), s2_fire_dup(1))
343
344  def getHist(ptr: CGHPtr): UInt = (Cat(ghv_wire.asUInt, ghv_wire.asUInt) >> (ptr.value + 1.U))(HistoryLength - 1, 0)
345  s0_ghist := getHist(s0_ghist_ptr_dup(0))
346
347  val resp = predictors.io.out
348
349  val toFtq_fire = io.bpu_to_ftq.resp.valid && io.bpu_to_ftq.resp.ready
350
351  val s1_flush_dup, s2_flush_dup, s3_flush_dup = dup_wire(Bool())
352  val s2_redirect_dup, s3_redirect_dup         = dup_wire(Bool())
353
354  // predictors.io := DontCare
355  predictors.io.in.valid               := s0_fire_dup(0)
356  predictors.io.in.bits.s0_pc          := s0_pc_dup
357  predictors.io.in.bits.ghist          := s0_ghist
358  predictors.io.in.bits.folded_hist    := s0_folded_gh_dup
359  predictors.io.in.bits.s1_folded_hist := s1_folded_gh_dup
360  predictors.io.in.bits.resp_in(0)     := 0.U.asTypeOf(new BranchPredictionResp)
361  predictors.io.fauftb_entry_in        := 0.U.asTypeOf(new FTBEntry)
362  predictors.io.fauftb_entry_hit_in    := false.B
363  predictors.io.redirectFromIFU        := RegNext(io.ftq_to_bpu.redirctFromIFU, init = false.B)
364  // predictors.io.in.bits.resp_in(0).s1.pc := s0_pc
365  // predictors.io.in.bits.toFtq_fire := toFtq_fire
366
367  // predictors.io.out.ready := io.bpu_to_ftq.resp.ready
368
369  val redirect_req    = io.ftq_to_bpu.redirect
370  val do_redirect_dup = dup_seq(RegNextWithEnable(redirect_req))
371
372  // Pipeline logic
373  s2_redirect_dup.map(_ := false.B)
374  s3_redirect_dup.map(_ := false.B)
375
376  s3_flush_dup.map(_ := redirect_req.valid) // flush when redirect comes
377  for (((s2_flush, s3_flush), s3_redirect) <- s2_flush_dup zip s3_flush_dup zip s3_redirect_dup)
378    s2_flush := s3_flush || s3_redirect
379  for (((s1_flush, s2_flush), s2_redirect) <- s1_flush_dup zip s2_flush_dup zip s2_redirect_dup)
380    s1_flush := s2_flush || s2_redirect
381
382  s1_components_ready_dup.map(_ := predictors.io.s1_ready)
383  for (((s1_ready, s1_fire), s1_valid) <- s1_ready_dup zip s1_fire_dup zip s1_valid_dup)
384    s1_ready := s1_fire || !s1_valid
385  for (((s0_fire, s1_components_ready), s1_ready) <- s0_fire_dup zip s1_components_ready_dup zip s1_ready_dup)
386    s0_fire             := s1_components_ready && s1_ready
387  predictors.io.s0_fire := s0_fire_dup
388
389  s2_components_ready_dup.map(_ := predictors.io.s2_ready)
390  for (((s2_ready, s2_fire), s2_valid) <- s2_ready_dup zip s2_fire_dup zip s2_valid_dup)
391    s2_ready := s2_fire || !s2_valid
392  for (
393    (((s1_fire, s2_components_ready), s2_ready), s1_valid) <-
394      s1_fire_dup zip s2_components_ready_dup zip s2_ready_dup zip s1_valid_dup
395  )
396    s1_fire := s1_valid && s2_components_ready && s2_ready && io.bpu_to_ftq.resp.ready
397
398  s3_components_ready_dup.map(_ := predictors.io.s3_ready)
399  for (((s3_ready, s3_fire), s3_valid) <- s3_ready_dup zip s3_fire_dup zip s3_valid_dup)
400    s3_ready := s3_fire || !s3_valid
401  for (
402    (((s2_fire, s3_components_ready), s3_ready), s2_valid) <-
403      s2_fire_dup zip s3_components_ready_dup zip s3_ready_dup zip s2_valid_dup
404  )
405    s2_fire := s2_valid && s3_components_ready && s3_ready
406
407  for ((((s0_fire, s1_flush), s1_fire), s1_valid) <- s0_fire_dup zip s1_flush_dup zip s1_fire_dup zip s1_valid_dup) {
408    when(redirect_req.valid)(s1_valid := false.B)
409      .elsewhen(s0_fire)(s1_valid := true.B)
410      .elsewhen(s1_flush)(s1_valid := false.B)
411      .elsewhen(s1_fire)(s1_valid := false.B)
412  }
413  predictors.io.s1_fire := s1_fire_dup
414
415  s2_fire_dup := s2_valid_dup
416
417  for (
418    ((((s1_fire, s2_flush), s2_fire), s2_valid), s1_flush) <-
419      s1_fire_dup zip s2_flush_dup zip s2_fire_dup zip s2_valid_dup zip s1_flush_dup
420  ) {
421
422    when(s2_flush)(s2_valid := false.B)
423      .elsewhen(s1_fire)(s2_valid := !s1_flush)
424      .elsewhen(s2_fire)(s2_valid := false.B)
425  }
426
427  predictors.io.s2_fire     := s2_fire_dup
428  predictors.io.s2_redirect := s2_redirect_dup
429
430  s3_fire_dup := s3_valid_dup
431
432  for (
433    ((((s2_fire, s3_flush), s3_fire), s3_valid), s2_flush) <-
434      s2_fire_dup zip s3_flush_dup zip s3_fire_dup zip s3_valid_dup zip s2_flush_dup
435  ) {
436
437    when(s3_flush)(s3_valid := false.B)
438      .elsewhen(s2_fire)(s3_valid := !s2_flush)
439      .elsewhen(s3_fire)(s3_valid := false.B)
440  }
441
442  predictors.io.s3_fire     := s3_fire_dup
443  predictors.io.s3_redirect := s3_redirect_dup
444
445  io.bpu_to_ftq.resp.valid :=
446    s1_valid_dup(2) && s2_components_ready_dup(2) && s2_ready_dup(2) ||
447      s2_fire_dup(2) && s2_redirect_dup(2) ||
448      s3_fire_dup(2) && s3_redirect_dup(2)
449  io.bpu_to_ftq.resp.bits                              := predictors.io.out
450  io.bpu_to_ftq.resp.bits.last_stage_spec_info.histPtr := s3_ghist_ptr_dup(2)
451
452  val full_pred_diff        = WireInit(false.B)
453  val full_pred_diff_stage  = WireInit(0.U)
454  val full_pred_diff_offset = WireInit(0.U)
455  for (i <- 0 until numDup - 1) {
456    when(io.bpu_to_ftq.resp.valid &&
457      ((io.bpu_to_ftq.resp.bits.s1.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s1.full_pred(
458        i + 1
459      ).asTypeOf(UInt()) && io.bpu_to_ftq.resp.bits.s1.full_pred(i).hit) ||
460        (io.bpu_to_ftq.resp.bits.s2.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s2.full_pred(
461          i + 1
462        ).asTypeOf(UInt()) && io.bpu_to_ftq.resp.bits.s2.full_pred(i).hit) ||
463        (io.bpu_to_ftq.resp.bits.s3.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s3.full_pred(
464          i + 1
465        ).asTypeOf(UInt()) && io.bpu_to_ftq.resp.bits.s3.full_pred(i).hit))) {
466      full_pred_diff        := true.B
467      full_pred_diff_offset := i.U
468      when(io.bpu_to_ftq.resp.bits.s1.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s1.full_pred(
469        i + 1
470      ).asTypeOf(UInt())) {
471        full_pred_diff_stage := 1.U
472      }.elsewhen(io.bpu_to_ftq.resp.bits.s2.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s2.full_pred(
473        i + 1
474      ).asTypeOf(UInt())) {
475        full_pred_diff_stage := 2.U
476      }.otherwise {
477        full_pred_diff_stage := 3.U
478      }
479    }
480  }
481  XSError(full_pred_diff, "Full prediction difference detected!")
482
483  // s0_stall should be exclusive with any other PC source
484  s0_stall_dup.zip(s1_valid_dup).zip(s2_redirect_dup).zip(s3_redirect_dup).zip(do_redirect_dup).foreach {
485    case ((((s0_stall, s1_valid), s2_redirect), s3_redirect), do_redirect) => {
486      s0_stall := !(s1_valid || s2_redirect || s3_redirect || do_redirect.valid)
487    }
488  }
489  // Power-on reset
490  val powerOnResetState = RegInit(true.B)
491  when(s0_fire_dup(0)) {
492    // When BPU pipeline first time fire, we consider power-on reset is done
493    powerOnResetState := false.B
494  }
495  XSError(
496    !powerOnResetState && s0_stall_dup(0) && s0_pc_dup(0) =/= s0_pc_reg_dup(0),
497    "s0_stall but s0_pc is differenct from s0_pc_reg"
498  )
499
500  npcGen_dup.zip(s0_pc_reg_dup).map { case (gen, reg) =>
501    gen.register(true.B, reg, Some("stallPC"), 0)
502  }
503  foldedGhGen_dup.zip(s0_folded_gh_reg_dup).map { case (gen, reg) =>
504    gen.register(true.B, reg, Some("stallFGH"), 0)
505  }
506  ghistPtrGen_dup.zip(s0_ghist_ptr_reg_dup).map { case (gen, reg) =>
507    gen.register(true.B, reg, Some("stallGHPtr"), 0)
508  }
509  lastBrNumOHGen_dup.zip(s0_last_br_num_oh_reg_dup).map { case (gen, reg) =>
510    gen.register(true.B, reg, Some("stallBrNumOH"), 0)
511  }
512  aheadFhObGen_dup.zip(s0_ahead_fh_oldest_bits_reg_dup).map { case (gen, reg) =>
513    gen.register(true.B, reg, Some("stallAFHOB"), 0)
514  }
515
516  // assign pred cycle for profiling
517  io.bpu_to_ftq.resp.bits.s1.full_pred.map(_.predCycle.map(_ := GTimer()))
518  io.bpu_to_ftq.resp.bits.s2.full_pred.map(_.predCycle.map(_ := GTimer()))
519  io.bpu_to_ftq.resp.bits.s3.full_pred.map(_.predCycle.map(_ := GTimer()))
520
521  // History manage
522  // s1
523  val s1_possible_predicted_ghist_ptrs_dup = s1_ghist_ptr_dup.map(ptr => (0 to numBr).map(ptr - _.U))
524  val s1_predicted_ghist_ptr_dup = s1_possible_predicted_ghist_ptrs_dup.zip(resp.s1.lastBrPosOH).map { case (ptr, oh) =>
525    Mux1H(oh, ptr)
526  }
527  val s1_possible_predicted_fhs_dup =
528    for (
529      ((((fgh, afh), br_num_oh), t), br_pos_oh) <-
530        s1_folded_gh_dup zip s1_ahead_fh_oldest_bits_dup zip s1_last_br_num_oh_dup zip resp.s1.brTaken zip resp.s1.lastBrPosOH
531    )
532      yield (0 to numBr).map(i =>
533        fgh.update(afh, br_num_oh, i, t & br_pos_oh(i))
534      )
535  val s1_predicted_fh_dup = resp.s1.lastBrPosOH.zip(s1_possible_predicted_fhs_dup).map { case (oh, fh) =>
536    Mux1H(oh, fh)
537  }
538
539  val s1_ahead_fh_ob_src_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos))
540  s1_ahead_fh_ob_src_dup.zip(s1_ghist_ptr_dup).map { case (src, ptr) => src.read(ghv, ptr) }
541
542  if (EnableGHistDiff) {
543    val s1_predicted_ghist = WireInit(getHist(s1_predicted_ghist_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool())))
544    for (i <- 0 until numBr) {
545      when(resp.s1.shouldShiftVec(0)(i)) {
546        s1_predicted_ghist(i) := resp.s1.brTaken(0) && (i == 0).B
547      }
548    }
549    when(s1_valid_dup(0)) {
550      s0_ghist := s1_predicted_ghist.asUInt
551    }
552  }
553
554  val s1_ghv_wens = (0 until HistoryLength).map(n =>
555    (0 until numBr).map(b =>
556      s1_ghist_ptr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value &&
557        resp.s1.shouldShiftVec(0)(b) && s1_valid_dup(0)
558    )
559  )
560  val s1_ghv_wdatas = (0 until HistoryLength).map(n =>
561    Mux1H(
562      (0 until numBr).map(b =>
563        (
564          s1_ghist_ptr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s1.shouldShiftVec(0)(b),
565          resp.s1.brTaken(0) && resp.s1.lastBrPosOH(0)(b + 1)
566        )
567      )
568    )
569  )
570
571  for (((npcGen, s1_valid), s1_target) <- npcGen_dup zip s1_valid_dup zip resp.s1.getTarget)
572    npcGen.register(s1_valid, s1_target, Some("s1_target"), 4)
573  for (((foldedGhGen, s1_valid), s1_predicted_fh) <- foldedGhGen_dup zip s1_valid_dup zip s1_predicted_fh_dup)
574    foldedGhGen.register(s1_valid, s1_predicted_fh, Some("s1_FGH"), 4)
575  for (
576    ((ghistPtrGen, s1_valid), s1_predicted_ghist_ptr) <- ghistPtrGen_dup zip s1_valid_dup zip s1_predicted_ghist_ptr_dup
577  )
578    ghistPtrGen.register(s1_valid, s1_predicted_ghist_ptr, Some("s1_GHPtr"), 4)
579  for (
580    ((lastBrNumOHGen, s1_valid), s1_brPosOH) <-
581      lastBrNumOHGen_dup zip s1_valid_dup zip resp.s1.lastBrPosOH.map(_.asUInt)
582  )
583    lastBrNumOHGen.register(s1_valid, s1_brPosOH, Some("s1_BrNumOH"), 4)
584  for (((aheadFhObGen, s1_valid), s1_ahead_fh_ob_src) <- aheadFhObGen_dup zip s1_valid_dup zip s1_ahead_fh_ob_src_dup)
585    aheadFhObGen.register(s1_valid, s1_ahead_fh_ob_src, Some("s1_AFHOB"), 4)
586  ghvBitWriteGens.zip(s1_ghv_wens).zipWithIndex.map { case ((b, w), i) =>
587    b.register(w.reduce(_ || _), s1_ghv_wdatas(i), Some(s"s1_new_bit_$i"), 4)
588  }
589
590  class PreviousPredInfo extends Bundle {
591    val hit         = Vec(numDup, Bool())
592    val target      = Vec(numDup, UInt(VAddrBits.W))
593    val lastBrPosOH = Vec(numDup, Vec(numBr + 1, Bool()))
594    val taken       = Vec(numDup, Bool())
595    val takenMask   = Vec(numDup, Vec(numBr, Bool()))
596    val cfiIndex    = Vec(numDup, UInt(log2Ceil(PredictWidth).W))
597  }
598
599  def preds_needs_redirect_vec_dup(x: PreviousPredInfo, y: BranchPredictionBundle) = {
600    // Timing optimization
601    // We first compare all target with previous stage target,
602    // then select the difference by taken & hit
603    // Usually target is generated quicker than taken, so do target compare before select can help timing
604    val targetDiffVec: IndexedSeq[Vec[Bool]] =
605      x.target.zip(y.getAllTargets).map {
606        case (xTarget, yAllTarget) => VecInit(yAllTarget.map(_ =/= xTarget))
607      } // [numDup][all Target comparison]
608    val targetDiff: IndexedSeq[Bool] =
609      targetDiffVec.zip(x.hit).zip(x.takenMask).map {
610        case ((diff, hit), takenMask) => selectByTaken(takenMask, hit, diff)
611      } // [numDup]
612
613    val lastBrPosOHDiff: IndexedSeq[Bool] = x.lastBrPosOH.zip(y.lastBrPosOH).map { case (oh1, oh2) =>
614      oh1.asUInt =/= oh2.asUInt
615    }
616    val takenDiff: IndexedSeq[Bool] = x.taken.zip(y.taken).map { case (t1, t2) => t1 =/= t2 }
617    val takenOffsetDiff: IndexedSeq[Bool] = x.cfiIndex.zip(y.cfiIndex).zip(x.taken).zip(y.taken).map {
618      case (((i1, i2), xt), yt) => xt && yt && i1 =/= i2.bits
619    }
620    VecInit(
621      for (
622        (((tgtd, lbpohd), tkd), tod) <-
623          targetDiff zip lastBrPosOHDiff zip takenDiff zip takenOffsetDiff
624      )
625        yield VecInit(tgtd, lbpohd, tkd, tod)
626      // x.shouldShiftVec.asUInt =/= y.shouldShiftVec.asUInt,
627      // x.brTaken =/= y.brTaken
628    )
629  }
630
631  // s2
632  val s2_possible_predicted_ghist_ptrs_dup = s2_ghist_ptr_dup.map(ptr => (0 to numBr).map(ptr - _.U))
633  val s2_predicted_ghist_ptr_dup = s2_possible_predicted_ghist_ptrs_dup.zip(resp.s2.lastBrPosOH).map { case (ptr, oh) =>
634    Mux1H(oh, ptr)
635  }
636
637  val s2_possible_predicted_fhs_dup =
638    for (
639      (((fgh, afh), br_num_oh), full_pred) <-
640        s2_folded_gh_dup zip s2_ahead_fh_oldest_bits_dup zip s2_last_br_num_oh_dup zip resp.s2.full_pred
641    )
642      yield (0 to numBr).map(i =>
643        fgh.update(afh, br_num_oh, i, if (i > 0) full_pred.br_taken_mask(i - 1) else false.B)
644      )
645  val s2_predicted_fh_dup = resp.s2.lastBrPosOH.zip(s2_possible_predicted_fhs_dup).map { case (oh, fh) =>
646    Mux1H(oh, fh)
647  }
648
649  val s2_ahead_fh_ob_src_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos))
650  s2_ahead_fh_ob_src_dup.zip(s2_ghist_ptr_dup).map { case (src, ptr) => src.read(ghv, ptr) }
651
652  if (EnableGHistDiff) {
653    val s2_predicted_ghist = WireInit(getHist(s2_predicted_ghist_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool())))
654    for (i <- 0 until numBr) {
655      when(resp.s2.shouldShiftVec(0)(i)) {
656        s2_predicted_ghist(i) := resp.s2.brTaken(0) && (i == 0).B
657      }
658    }
659    when(s2_redirect_dup(0)) {
660      s0_ghist := s2_predicted_ghist.asUInt
661    }
662  }
663
664  val s2_ghv_wens = (0 until HistoryLength).map(n =>
665    (0 until numBr).map(b =>
666      s2_ghist_ptr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value &&
667        resp.s2.shouldShiftVec(0)(b) && s2_redirect_dup(0)
668    )
669  )
670  val s2_ghv_wdatas = (0 until HistoryLength).map(n =>
671    Mux1H(
672      (0 until numBr).map(b =>
673        (
674          s2_ghist_ptr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s2.shouldShiftVec(0)(b),
675          resp.s2.full_pred(0).real_br_taken_mask()(b)
676        )
677      )
678    )
679  )
680
681  val s1_pred_info = Wire(new PreviousPredInfo)
682  s1_pred_info.hit         := resp.s1.full_pred.map(_.hit)
683  s1_pred_info.target      := resp.s1.getTarget
684  s1_pred_info.lastBrPosOH := resp.s1.lastBrPosOH
685  s1_pred_info.taken       := resp.s1.taken
686  s1_pred_info.takenMask   := resp.s1.full_pred.map(_.taken_mask_on_slot)
687  s1_pred_info.cfiIndex    := resp.s1.cfiIndex.map { case x => x.bits }
688
689  val previous_s1_pred_info = RegEnable(s1_pred_info, 0.U.asTypeOf(new PreviousPredInfo), s1_fire_dup(0))
690
691  val s2_redirect_s1_last_pred_vec_dup = preds_needs_redirect_vec_dup(previous_s1_pred_info, resp.s2)
692
693  for (
694    ((s2_redirect, s2_fire), s2_redirect_s1_last_pred_vec) <-
695      s2_redirect_dup zip s2_fire_dup zip s2_redirect_s1_last_pred_vec_dup
696  )
697    s2_redirect := s2_fire && s2_redirect_s1_last_pred_vec.reduce(_ || _)
698
699  for (((npcGen, s2_redirect), s2_target) <- npcGen_dup zip s2_redirect_dup zip resp.s2.getTarget)
700    npcGen.register(s2_redirect, s2_target, Some("s2_target"), 5)
701  for (((foldedGhGen, s2_redirect), s2_predicted_fh) <- foldedGhGen_dup zip s2_redirect_dup zip s2_predicted_fh_dup)
702    foldedGhGen.register(s2_redirect, s2_predicted_fh, Some("s2_FGH"), 5)
703  for (
704    ((ghistPtrGen, s2_redirect), s2_predicted_ghist_ptr) <-
705      ghistPtrGen_dup zip s2_redirect_dup zip s2_predicted_ghist_ptr_dup
706  )
707    ghistPtrGen.register(s2_redirect, s2_predicted_ghist_ptr, Some("s2_GHPtr"), 5)
708  for (
709    ((lastBrNumOHGen, s2_redirect), s2_brPosOH) <-
710      lastBrNumOHGen_dup zip s2_redirect_dup zip resp.s2.lastBrPosOH.map(_.asUInt)
711  )
712    lastBrNumOHGen.register(s2_redirect, s2_brPosOH, Some("s2_BrNumOH"), 5)
713  for (
714    ((aheadFhObGen, s2_redirect), s2_ahead_fh_ob_src) <- aheadFhObGen_dup zip s2_redirect_dup zip s2_ahead_fh_ob_src_dup
715  )
716    aheadFhObGen.register(s2_redirect, s2_ahead_fh_ob_src, Some("s2_AFHOB"), 5)
717  ghvBitWriteGens.zip(s2_ghv_wens).zipWithIndex.map { case ((b, w), i) =>
718    b.register(w.reduce(_ || _), s2_ghv_wdatas(i), Some(s"s2_new_bit_$i"), 5)
719  }
720
721  XSPerfAccumulate("s2_redirect_because_target_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(0))
722  XSPerfAccumulate("s2_redirect_because_branch_num_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(1))
723  XSPerfAccumulate("s2_redirect_because_direction_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(2))
724  XSPerfAccumulate("s2_redirect_because_cfi_idx_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(3))
725  // XSPerfAccumulate("s2_redirect_because_shouldShiftVec_diff", s2_fire && s2_redirect_s1_last_pred_vec(4))
726  // XSPerfAccumulate("s2_redirect_because_brTaken_diff", s2_fire && s2_redirect_s1_last_pred_vec(5))
727  XSPerfAccumulate("s2_redirect_because_fallThroughError", s2_fire_dup(0) && resp.s2.fallThruError(0))
728
729  XSPerfAccumulate("s2_redirect_when_taken", s2_redirect_dup(0) && resp.s2.taken(0) && resp.s2.full_pred(0).hit)
730  XSPerfAccumulate("s2_redirect_when_not_taken", s2_redirect_dup(0) && !resp.s2.taken(0) && resp.s2.full_pred(0).hit)
731  XSPerfAccumulate("s2_redirect_when_not_hit", s2_redirect_dup(0) && !resp.s2.full_pred(0).hit)
732
733  // s3
734  val s3_possible_predicted_ghist_ptrs_dup = s3_ghist_ptr_dup.map(ptr => (0 to numBr).map(ptr - _.U))
735  val s3_predicted_ghist_ptr_dup = s3_possible_predicted_ghist_ptrs_dup.zip(resp.s3.lastBrPosOH).map { case (ptr, oh) =>
736    Mux1H(oh, ptr)
737  }
738
739  val s3_possible_predicted_fhs_dup =
740    for (
741      (((fgh, afh), br_num_oh), full_pred) <-
742        s3_folded_gh_dup zip s3_ahead_fh_oldest_bits_dup zip s3_last_br_num_oh_dup zip resp.s3.full_pred
743    )
744      yield (0 to numBr).map(i =>
745        fgh.update(afh, br_num_oh, i, if (i > 0) full_pred.br_taken_mask(i - 1) else false.B)
746      )
747  val s3_predicted_fh_dup = resp.s3.lastBrPosOH.zip(s3_possible_predicted_fhs_dup).map { case (oh, fh) =>
748    Mux1H(oh, fh)
749  }
750
751  val s3_ahead_fh_ob_src_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos))
752  s3_ahead_fh_ob_src_dup.zip(s3_ghist_ptr_dup).map { case (src, ptr) => src.read(ghv, ptr) }
753
754  if (EnableGHistDiff) {
755    val s3_predicted_ghist = WireInit(getHist(s3_predicted_ghist_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool())))
756    for (i <- 0 until numBr) {
757      when(resp.s3.shouldShiftVec(0)(i)) {
758        s3_predicted_ghist(i) := resp.s3.brTaken(0) && (i == 0).B
759      }
760    }
761    when(s3_redirect_dup(0)) {
762      s0_ghist := s3_predicted_ghist.asUInt
763    }
764  }
765
766  val s3_ghv_wens = (0 until HistoryLength).map(n =>
767    (0 until numBr).map(b =>
768      s3_ghist_ptr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s3.shouldShiftVec(0)(
769        b
770      ) && s3_redirect_dup(0)
771    )
772  )
773  val s3_ghv_wdatas = (0 until HistoryLength).map(n =>
774    Mux1H(
775      (0 until numBr).map(b =>
776        (
777          s3_ghist_ptr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s3.shouldShiftVec(0)(b),
778          resp.s3.full_pred(0).real_br_taken_mask()(b)
779        )
780      )
781    )
782  )
783
784  val previous_s2_pred = RegEnable(resp.s2, 0.U.asTypeOf(resp.s2), s2_fire_dup(0))
785
786  val s3_redirect_on_br_taken_dup = resp.s3.full_pred.zip(previous_s2_pred.full_pred).map { case (fp1, fp2) =>
787    fp1.real_br_taken_mask().asUInt =/= fp2.real_br_taken_mask().asUInt
788  }
789  val s3_both_first_taken_dup = resp.s3.full_pred.zip(previous_s2_pred.full_pred).map { case (fp1, fp2) =>
790    fp1.real_br_taken_mask()(0) && fp2.real_br_taken_mask()(0)
791  }
792  val s3_redirect_on_target_dup = resp.s3.getTarget.zip(previous_s2_pred.getTarget).map { case (t1, t2) => t1 =/= t2 }
793  val s3_redirect_on_jalr_target_dup = resp.s3.full_pred.zip(previous_s2_pred.full_pred).map { case (fp1, fp2) =>
794    fp1.hit_taken_on_jalr && fp1.jalr_target =/= fp2.jalr_target
795  }
796  val s3_redirect_on_fall_thru_error_dup = resp.s3.fallThruError
797  val s3_redirect_on_ftb_multi_hit_dup   = resp.s3.ftbMultiHit
798
799  for (
800    (
801      (
802        ((((s3_redirect, s3_fire), s3_redirect_on_br_taken), s3_redirect_on_target), s3_redirect_on_fall_thru_error),
803        s3_redirect_on_ftb_multi_hit
804      ),
805      s3_both_first_taken
806    ) <-
807      s3_redirect_dup zip s3_fire_dup zip s3_redirect_on_br_taken_dup zip s3_redirect_on_target_dup zip s3_redirect_on_fall_thru_error_dup zip s3_redirect_on_ftb_multi_hit_dup zip s3_both_first_taken_dup
808  ) {
809
810    s3_redirect := s3_fire && (
811      (s3_redirect_on_br_taken && !s3_both_first_taken) || s3_redirect_on_target || s3_redirect_on_fall_thru_error || s3_redirect_on_ftb_multi_hit
812    )
813  }
814
815  XSPerfAccumulate(f"s3_redirect_on_br_taken", s3_fire_dup(0) && s3_redirect_on_br_taken_dup(0))
816  XSPerfAccumulate(f"s3_redirect_on_jalr_target", s3_fire_dup(0) && s3_redirect_on_jalr_target_dup(0))
817  XSPerfAccumulate(
818    f"s3_redirect_on_others",
819    s3_redirect_dup(0) && !(s3_redirect_on_br_taken_dup(0) || s3_redirect_on_jalr_target_dup(0))
820  )
821
822  for (((npcGen, s3_redirect), s3_target) <- npcGen_dup zip s3_redirect_dup zip resp.s3.getTarget)
823    npcGen.register(s3_redirect, s3_target, Some("s3_target"), 3)
824  for (((foldedGhGen, s3_redirect), s3_predicted_fh) <- foldedGhGen_dup zip s3_redirect_dup zip s3_predicted_fh_dup)
825    foldedGhGen.register(s3_redirect, s3_predicted_fh, Some("s3_FGH"), 3)
826  for (
827    ((ghistPtrGen, s3_redirect), s3_predicted_ghist_ptr) <-
828      ghistPtrGen_dup zip s3_redirect_dup zip s3_predicted_ghist_ptr_dup
829  )
830    ghistPtrGen.register(s3_redirect, s3_predicted_ghist_ptr, Some("s3_GHPtr"), 3)
831  for (
832    ((lastBrNumOHGen, s3_redirect), s3_brPosOH) <-
833      lastBrNumOHGen_dup zip s3_redirect_dup zip resp.s3.lastBrPosOH.map(_.asUInt)
834  )
835    lastBrNumOHGen.register(s3_redirect, s3_brPosOH, Some("s3_BrNumOH"), 3)
836  for (
837    ((aheadFhObGen, s3_redirect), s3_ahead_fh_ob_src) <- aheadFhObGen_dup zip s3_redirect_dup zip s3_ahead_fh_ob_src_dup
838  )
839    aheadFhObGen.register(s3_redirect, s3_ahead_fh_ob_src, Some("s3_AFHOB"), 3)
840  ghvBitWriteGens.zip(s3_ghv_wens).zipWithIndex.map { case ((b, w), i) =>
841    b.register(w.reduce(_ || _), s3_ghv_wdatas(i), Some(s"s3_new_bit_$i"), 3)
842  }
843
844  // Send signal tell Ftq override
845  val s2_ftq_idx = RegEnable(io.ftq_to_bpu.enq_ptr, s1_fire_dup(0))
846  val s3_ftq_idx = RegEnable(s2_ftq_idx, s2_fire_dup(0))
847
848  for (((to_ftq_s1_valid, s1_fire), s1_flush) <- io.bpu_to_ftq.resp.bits.s1.valid zip s1_fire_dup zip s1_flush_dup) {
849    to_ftq_s1_valid := s1_fire && !s1_flush
850  }
851  io.bpu_to_ftq.resp.bits.s1.hasRedirect.map(_ := false.B)
852  io.bpu_to_ftq.resp.bits.s1.ftq_idx := DontCare
853  for (((to_ftq_s2_valid, s2_fire), s2_flush) <- io.bpu_to_ftq.resp.bits.s2.valid zip s2_fire_dup zip s2_flush_dup) {
854    to_ftq_s2_valid := s2_fire && !s2_flush
855  }
856  io.bpu_to_ftq.resp.bits.s2.hasRedirect.zip(s2_redirect_dup).map { case (hr, r) => hr := r }
857  io.bpu_to_ftq.resp.bits.s2.ftq_idx := s2_ftq_idx
858  for (((to_ftq_s3_valid, s3_fire), s3_flush) <- io.bpu_to_ftq.resp.bits.s3.valid zip s3_fire_dup zip s3_flush_dup) {
859    to_ftq_s3_valid := s3_fire && !s3_flush
860  }
861  io.bpu_to_ftq.resp.bits.s3.hasRedirect.zip(s3_redirect_dup).map { case (hr, r) => hr := r }
862  io.bpu_to_ftq.resp.bits.s3.ftq_idx := s3_ftq_idx
863
864  predictors.io.update.valid := RegNext(io.ftq_to_bpu.update.valid, init = false.B)
865  predictors.io.update.bits  := RegEnable(io.ftq_to_bpu.update.bits, io.ftq_to_bpu.update.valid)
866  predictors.io.update.bits.ghist := RegEnable(
867    getHist(io.ftq_to_bpu.update.bits.spec_info.histPtr),
868    io.ftq_to_bpu.update.valid
869  )
870
871  val redirect_dup = do_redirect_dup.map(_.bits)
872  predictors.io.redirect := do_redirect_dup(0)
873
874  // Redirect logic
875  val shift_dup       = redirect_dup.map(_.cfiUpdate.shift)
876  val addIntoHist_dup = redirect_dup.map(_.cfiUpdate.addIntoHist)
877  // TODO: remove these below
878  val shouldShiftVec_dup = shift_dup.map(shift =>
879    Mux(
880      shift === 0.U,
881      VecInit(0.U((1 << (log2Ceil(numBr) + 1)).W).asBools),
882      VecInit(LowerMask(1.U << (shift - 1.U)).asBools)
883    )
884  )
885  // TODO end
886  val afhob_dup       = redirect_dup.map(_.cfiUpdate.afhob)
887  val lastBrNumOH_dup = redirect_dup.map(_.cfiUpdate.lastBrNumOH)
888
889  val isBr_dup  = redirect_dup.map(_.cfiUpdate.pd.isBr)
890  val taken_dup = redirect_dup.map(_.cfiUpdate.taken)
891  val real_br_taken_mask_dup =
892    for (((shift, taken), addIntoHist) <- shift_dup zip taken_dup zip addIntoHist_dup)
893      yield (0 until numBr).map(i => shift === (i + 1).U && taken && addIntoHist)
894
895  val oldPtr_dup      = redirect_dup.map(_.cfiUpdate.histPtr)
896  val updated_ptr_dup = oldPtr_dup.zip(shift_dup).map { case (oldPtr, shift) => oldPtr - shift }
897  def computeFoldedHist(hist: UInt, compLen: Int)(histLen: Int): UInt =
898    if (histLen > 0) {
899      val nChunks     = (histLen + compLen - 1) / compLen
900      val hist_chunks = (0 until nChunks) map { i => hist(min((i + 1) * compLen, histLen) - 1, i * compLen) }
901      ParallelXOR(hist_chunks)
902    } else 0.U
903
904  val oldFh_dup = dup_seq(WireInit(0.U.asTypeOf(new AllFoldedHistories(foldedGHistInfos))))
905  oldFh_dup.zip(oldPtr_dup).map { case (oldFh, oldPtr) =>
906    foldedGHistInfos.foreach { case (histLen, compLen) =>
907      oldFh.getHistWithInfo((histLen, compLen)).folded_hist := computeFoldedHist(getHist(oldPtr), compLen)(histLen)
908    }
909  }
910
911  val updated_fh_dup =
912    for (
913      ((((oldFh, oldPtr), taken), addIntoHist), shift) <-
914        oldFh_dup zip oldPtr_dup zip taken_dup zip addIntoHist_dup zip shift_dup
915    )
916      yield VecInit((0 to numBr).map(i => oldFh.update(ghv, oldPtr, i, taken && addIntoHist)))(shift)
917  val thisBrNumOH_dup   = shift_dup.map(shift => UIntToOH(shift, numBr + 1))
918  val thisAheadFhOb_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos))
919  thisAheadFhOb_dup.zip(oldPtr_dup).map { case (afhob, oldPtr) => afhob.read(ghv, oldPtr) }
920  val redirect_ghv_wens = (0 until HistoryLength).map(n =>
921    (0 until numBr).map(b =>
922      oldPtr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && shouldShiftVec_dup(0)(b) && do_redirect_dup(0).valid
923    )
924  )
925  val redirect_ghv_wdatas = (0 until HistoryLength).map(n =>
926    Mux1H(
927      (0 until numBr).map(b => oldPtr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && shouldShiftVec_dup(0)(b)),
928      real_br_taken_mask_dup(0)
929    )
930  )
931
932  if (EnableGHistDiff) {
933    val updated_ghist = WireInit(getHist(updated_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool())))
934    for (i <- 0 until numBr) {
935      when(shift_dup(0) >= (i + 1).U) {
936        updated_ghist(i) := taken_dup(0) && addIntoHist_dup(0) && (i == 0).B
937      }
938    }
939    when(do_redirect_dup(0).valid) {
940      s0_ghist := updated_ghist.asUInt
941    }
942  }
943
944  // Commit time history checker
945  if (EnableCommitGHistDiff) {
946    val commitGHist    = RegInit(0.U.asTypeOf(Vec(HistoryLength, Bool())))
947    val commitGHistPtr = RegInit(0.U.asTypeOf(new CGHPtr))
948    def getCommitHist(ptr: CGHPtr): UInt =
949      (Cat(commitGHist.asUInt, commitGHist.asUInt) >> (ptr.value + 1.U))(HistoryLength - 1, 0)
950
951    val updateValid:         Bool      = io.ftq_to_bpu.update.valid
952    val branchValidMask:     UInt      = io.ftq_to_bpu.update.bits.ftb_entry.brValids.asUInt
953    val branchCommittedMask: Vec[Bool] = io.ftq_to_bpu.update.bits.br_committed
954    val misPredictMask:      UInt      = io.ftq_to_bpu.update.bits.mispred_mask.asUInt
955    val takenMask: UInt =
956      io.ftq_to_bpu.update.bits.br_taken_mask.asUInt |
957        io.ftq_to_bpu.update.bits.ftb_entry.strong_bias.asUInt // Always taken branch is recorded in history
958    val takenIdx:      UInt = (PriorityEncoder(takenMask) + 1.U((log2Ceil(numBr) + 1).W)).asUInt
959    val misPredictIdx: UInt = (PriorityEncoder(misPredictMask) + 1.U((log2Ceil(numBr) + 1).W)).asUInt
960    val shouldShiftMask: UInt = Mux(takenMask.orR, LowerMask(takenIdx).asUInt, ((1 << numBr) - 1).asUInt) &
961      Mux(misPredictMask.orR, LowerMask(misPredictIdx).asUInt, ((1 << numBr) - 1).asUInt) &
962      branchCommittedMask.asUInt
963    val updateShift: UInt =
964      Mux(updateValid && branchValidMask.orR, PopCount(branchValidMask & shouldShiftMask), 0.U)
965
966    // Maintain the commitGHist
967    for (i <- 0 until numBr) {
968      when(updateShift >= (i + 1).U) {
969        val ptr: CGHPtr = commitGHistPtr - i.asUInt
970        commitGHist(ptr.value) := takenMask(i)
971      }
972    }
973    when(updateValid) {
974      commitGHistPtr := commitGHistPtr - updateShift
975    }
976
977    // Calculate true history using Parallel XOR
978    // Do differential
979    TageTableInfos.map {
980      case (nRows, histLen, _) => {
981        val nRowsPerBr      = nRows / numBr
982        val predictGHistPtr = io.ftq_to_bpu.update.bits.spec_info.histPtr
983        val commitTrueHist: UInt = computeFoldedHist(getCommitHist(commitGHistPtr), log2Ceil(nRowsPerBr))(histLen)
984        val predictFHist:   UInt = computeFoldedHist(getHist(predictGHistPtr), log2Ceil(nRowsPerBr))(histLen)
985        XSWarn(
986          updateValid && predictFHist =/= commitTrueHist,
987          p"predict time ghist: ${predictFHist} is different from commit time: ${commitTrueHist}\n"
988        )
989      }
990    }
991  }
992
993  // val updatedGh = oldGh.update(shift, taken && addIntoHist)
994  for ((npcGen, do_redirect) <- npcGen_dup zip do_redirect_dup)
995    npcGen.register(do_redirect.valid, do_redirect.bits.cfiUpdate.target, Some("redirect_target"), 2)
996  for (((foldedGhGen, do_redirect), updated_fh) <- foldedGhGen_dup zip do_redirect_dup zip updated_fh_dup)
997    foldedGhGen.register(do_redirect.valid, updated_fh, Some("redirect_FGHT"), 2)
998  for (((ghistPtrGen, do_redirect), updated_ptr) <- ghistPtrGen_dup zip do_redirect_dup zip updated_ptr_dup)
999    ghistPtrGen.register(do_redirect.valid, updated_ptr, Some("redirect_GHPtr"), 2)
1000  for (((lastBrNumOHGen, do_redirect), thisBrNumOH) <- lastBrNumOHGen_dup zip do_redirect_dup zip thisBrNumOH_dup)
1001    lastBrNumOHGen.register(do_redirect.valid, thisBrNumOH, Some("redirect_BrNumOH"), 2)
1002  for (((aheadFhObGen, do_redirect), thisAheadFhOb) <- aheadFhObGen_dup zip do_redirect_dup zip thisAheadFhOb_dup)
1003    aheadFhObGen.register(do_redirect.valid, thisAheadFhOb, Some("redirect_AFHOB"), 2)
1004  ghvBitWriteGens.zip(redirect_ghv_wens).zipWithIndex.map { case ((b, w), i) =>
1005    b.register(w.reduce(_ || _), redirect_ghv_wdatas(i), Some(s"redirect_new_bit_$i"), 2)
1006  }
1007  // no need to assign s0_last_pred
1008
1009  // val need_reset = RegNext(reset.asBool) && !reset.asBool
1010
1011  // Reset
1012  // npcGen.register(need_reset, resetVector.U, Some("reset_pc"), 1)
1013  // foldedGhGen.register(need_reset, 0.U.asTypeOf(s0_folded_gh), Some("reset_FGH"), 1)
1014  // ghistPtrGen.register(need_reset, 0.U.asTypeOf(new CGHPtr), Some("reset_GHPtr"), 1)
1015
1016  s0_pc_dup.zip(npcGen_dup).map { case (s0_pc, npcGen) => s0_pc := npcGen() }
1017  s0_folded_gh_dup.zip(foldedGhGen_dup).map { case (s0_folded_gh, foldedGhGen) => s0_folded_gh := foldedGhGen() }
1018  s0_ghist_ptr_dup.zip(ghistPtrGen_dup).map { case (s0_ghist_ptr, ghistPtrGen) => s0_ghist_ptr := ghistPtrGen() }
1019  s0_ahead_fh_oldest_bits_dup.zip(aheadFhObGen_dup).map { case (s0_ahead_fh_oldest_bits, aheadFhObGen) =>
1020    s0_ahead_fh_oldest_bits := aheadFhObGen()
1021  }
1022  s0_last_br_num_oh_dup.zip(lastBrNumOHGen_dup).map { case (s0_last_br_num_oh, lastBrNumOHGen) =>
1023    s0_last_br_num_oh := lastBrNumOHGen()
1024  }
1025  (ghv_write_datas zip ghvBitWriteGens).map { case (wd, d) => wd := d() }
1026  for (i <- 0 until HistoryLength) {
1027    ghv_wens(i) := Seq(s1_ghv_wens, s2_ghv_wens, s3_ghv_wens, redirect_ghv_wens).map(_(i).reduce(_ || _)).reduce(_ || _)
1028    when(ghv_wens(i)) {
1029      ghv(i) := ghv_write_datas(i)
1030    }
1031  }
1032
1033  // TODO: signals for memVio and other Redirects
1034  controlRedirectBubble := do_redirect_dup(0).valid && do_redirect_dup(0).bits.ControlRedirectBubble
1035  ControlBTBMissBubble  := do_redirect_dup(0).bits.ControlBTBMissBubble
1036  TAGEMissBubble        := do_redirect_dup(0).bits.TAGEMissBubble
1037  SCMissBubble          := do_redirect_dup(0).bits.SCMissBubble
1038  ITTAGEMissBubble      := do_redirect_dup(0).bits.ITTAGEMissBubble
1039  RASMissBubble         := do_redirect_dup(0).bits.RASMissBubble
1040
1041  memVioRedirectBubble                 := do_redirect_dup(0).valid && do_redirect_dup(0).bits.MemVioRedirectBubble
1042  otherRedirectBubble                  := do_redirect_dup(0).valid && do_redirect_dup(0).bits.OtherRedirectBubble
1043  btbMissBubble                        := do_redirect_dup(0).valid && do_redirect_dup(0).bits.BTBMissBubble
1044  overrideBubble(0)                    := s2_redirect_dup(0)
1045  overrideBubble(1)                    := s3_redirect_dup(0)
1046  ftqUpdateBubble(0)                   := !s1_components_ready_dup(0)
1047  ftqUpdateBubble(1)                   := !s2_components_ready_dup(0)
1048  ftqUpdateBubble(2)                   := !s3_components_ready_dup(0)
1049  ftqFullStall                         := !io.bpu_to_ftq.resp.ready
1050  io.bpu_to_ftq.resp.bits.topdown_info := topdown_stages(numOfStage - 1)
1051
1052  // topdown handling logic here
1053  when(controlRedirectBubble) {
1054    /*
1055    for (i <- 0 until numOfStage)
1056      topdown_stages(i).reasons(TopDownCounters.ControlRedirectBubble.id) := true.B
1057    io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.ControlRedirectBubble.id) := true.B
1058     */
1059    when(ControlBTBMissBubble) {
1060      for (i <- 0 until numOfStage)
1061        topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id)                  := true.B
1062      io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.BTBMissBubble.id) := true.B
1063    }.elsewhen(TAGEMissBubble) {
1064      for (i <- 0 until numOfStage)
1065        topdown_stages(i).reasons(TopDownCounters.TAGEMissBubble.id)                  := true.B
1066      io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.TAGEMissBubble.id) := true.B
1067    }.elsewhen(SCMissBubble) {
1068      for (i <- 0 until numOfStage)
1069        topdown_stages(i).reasons(TopDownCounters.SCMissBubble.id)                  := true.B
1070      io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.SCMissBubble.id) := true.B
1071    }.elsewhen(ITTAGEMissBubble) {
1072      for (i <- 0 until numOfStage)
1073        topdown_stages(i).reasons(TopDownCounters.ITTAGEMissBubble.id)                  := true.B
1074      io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B
1075    }.elsewhen(RASMissBubble) {
1076      for (i <- 0 until numOfStage)
1077        topdown_stages(i).reasons(TopDownCounters.RASMissBubble.id)                  := true.B
1078      io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.RASMissBubble.id) := true.B
1079    }
1080  }
1081  when(memVioRedirectBubble) {
1082    for (i <- 0 until numOfStage)
1083      topdown_stages(i).reasons(TopDownCounters.MemVioRedirectBubble.id)                  := true.B
1084    io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B
1085  }
1086  when(otherRedirectBubble) {
1087    for (i <- 0 until numOfStage)
1088      topdown_stages(i).reasons(TopDownCounters.OtherRedirectBubble.id)                  := true.B
1089    io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.OtherRedirectBubble.id) := true.B
1090  }
1091  when(btbMissBubble) {
1092    for (i <- 0 until numOfStage)
1093      topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id)                  := true.B
1094    io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.BTBMissBubble.id) := true.B
1095  }
1096
1097  for (i <- 0 until numOfStage) {
1098    if (i < numOfStage - overrideStage) {
1099      when(overrideBubble(i)) {
1100        for (j <- 0 to i)
1101          topdown_stages(j).reasons(TopDownCounters.OverrideBubble.id) := true.B
1102      }
1103    }
1104    if (i < numOfStage - ftqUpdateStage) {
1105      when(ftqUpdateBubble(i)) {
1106        topdown_stages(i).reasons(TopDownCounters.FtqUpdateBubble.id) := true.B
1107      }
1108    }
1109  }
1110  when(ftqFullStall) {
1111    topdown_stages(0).reasons(TopDownCounters.FtqFullStall.id) := true.B
1112  }
1113
1114  XSError(
1115    isBefore(redirect_dup(0).cfiUpdate.histPtr, s3_ghist_ptr_dup(0)) && do_redirect_dup(0).valid,
1116    p"s3_ghist_ptr ${s3_ghist_ptr_dup(0)} exceeds redirect histPtr ${redirect_dup(0).cfiUpdate.histPtr}\n"
1117  )
1118  XSError(
1119    isBefore(redirect_dup(0).cfiUpdate.histPtr, s2_ghist_ptr_dup(0)) && do_redirect_dup(0).valid,
1120    p"s2_ghist_ptr ${s2_ghist_ptr_dup(0)} exceeds redirect histPtr ${redirect_dup(0).cfiUpdate.histPtr}\n"
1121  )
1122  XSError(
1123    isBefore(redirect_dup(0).cfiUpdate.histPtr, s1_ghist_ptr_dup(0)) && do_redirect_dup(0).valid,
1124    p"s1_ghist_ptr ${s1_ghist_ptr_dup(0)} exceeds redirect histPtr ${redirect_dup(0).cfiUpdate.histPtr}\n"
1125  )
1126
1127  XSDebug(RegNext(reset.asBool) && !reset.asBool, "Reseting...\n")
1128  XSDebug(io.ftq_to_bpu.update.valid, p"Update from ftq\n")
1129  XSDebug(io.ftq_to_bpu.redirect.valid, p"Redirect from ftq\n")
1130
1131  XSDebug("[BP0]                 fire=%d                      pc=%x\n", s0_fire_dup(0), s0_pc_dup(0))
1132  XSDebug(
1133    "[BP1] v=%d r=%d cr=%d fire=%d             flush=%d pc=%x\n",
1134    s1_valid_dup(0),
1135    s1_ready_dup(0),
1136    s1_components_ready_dup(0),
1137    s1_fire_dup(0),
1138    s1_flush_dup(0),
1139    s1_pc
1140  )
1141  XSDebug(
1142    "[BP2] v=%d r=%d cr=%d fire=%d redirect=%d flush=%d pc=%x\n",
1143    s2_valid_dup(0),
1144    s2_ready_dup(0),
1145    s2_components_ready_dup(0),
1146    s2_fire_dup(0),
1147    s2_redirect_dup(0),
1148    s2_flush_dup(0),
1149    s2_pc
1150  )
1151  XSDebug(
1152    "[BP3] v=%d r=%d cr=%d fire=%d redirect=%d flush=%d pc=%x\n",
1153    s3_valid_dup(0),
1154    s3_ready_dup(0),
1155    s3_components_ready_dup(0),
1156    s3_fire_dup(0),
1157    s3_redirect_dup(0),
1158    s3_flush_dup(0),
1159    s3_pc
1160  )
1161  XSDebug("[FTQ] ready=%d\n", io.bpu_to_ftq.resp.ready)
1162  XSDebug("resp.s1.target=%x\n", resp.s1.getTarget(0))
1163  XSDebug("resp.s2.target=%x\n", resp.s2.getTarget(0))
1164  // XSDebug("s0_ghist: %b\n", s0_ghist.predHist)
1165  // XSDebug("s1_ghist: %b\n", s1_ghist.predHist)
1166  // XSDebug("s2_ghist: %b\n", s2_ghist.predHist)
1167  // XSDebug("s2_predicted_ghist: %b\n", s2_predicted_ghist.predHist)
1168  XSDebug(p"s0_ghist_ptr: ${s0_ghist_ptr_dup(0)}\n")
1169  XSDebug(p"s1_ghist_ptr: ${s1_ghist_ptr_dup(0)}\n")
1170  XSDebug(p"s2_ghist_ptr: ${s2_ghist_ptr_dup(0)}\n")
1171  XSDebug(p"s3_ghist_ptr: ${s3_ghist_ptr_dup(0)}\n")
1172
1173  io.ftq_to_bpu.update.bits.display(io.ftq_to_bpu.update.valid)
1174  io.ftq_to_bpu.redirect.bits.display(io.ftq_to_bpu.redirect.valid)
1175
1176  XSPerfAccumulate("s2_redirect", s2_redirect_dup(0))
1177  XSPerfAccumulate("s3_redirect", s3_redirect_dup(0))
1178  XSPerfAccumulate("s1_not_valid", !s1_valid_dup(0))
1179
1180  val perfEvents = predictors.asInstanceOf[Composer].getPerfEvents
1181  generatePerfEvent()
1182}
1183