xref: /XiangShan/src/main/scala/xiangshan/frontend/BPU.scala (revision 1bc48dd1fa0af361fd194c65bad3b86349ec2903)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.frontend
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import xiangshan._
23import utils._
24import utility._
25
26import scala.math.min
27import xiangshan.backend.decode.ImmUnion
28
29trait HasBPUConst extends HasXSParameter {
30  val MaxMetaBaseLength =  if (!env.FPGAPlatform) 512 else 256 // TODO: Reduce meta length
31  val MaxMetaLength = if (HasHExtension) MaxMetaBaseLength + 4 else MaxMetaBaseLength
32  val MaxBasicBlockSize = 32
33  val LHistoryLength = 32
34  // val numBr = 2
35  val useBPD = true
36  val useLHist = true
37  val numBrSlot = numBr-1
38  val totalSlot = numBrSlot + 1
39
40  val numDup = 4
41
42  // Used to gate PC higher parts
43  val pcSegments = Seq(VAddrBits - 24, 12, 12)
44
45  def BP_STAGES = (0 until 3).map(_.U(2.W))
46  def BP_S1 = BP_STAGES(0)
47  def BP_S2 = BP_STAGES(1)
48  def BP_S3 = BP_STAGES(2)
49
50  def dup_seq[T](src: T, num: Int = numDup) = Seq.tabulate(num)(n => src)
51  def dup[T <: Data](src: T, num: Int = numDup) = VecInit(Seq.tabulate(num)(n => src))
52  def dup_wire[T <: Data](src: T, num: Int = numDup) = Wire(Vec(num, src.cloneType))
53  def dup_idx = Seq.tabulate(numDup)(n => n.toString())
54  val numBpStages = BP_STAGES.length
55
56  val debug = true
57  // TODO: Replace log2Up by log2Ceil
58}
59
60trait HasBPUParameter extends HasXSParameter with HasBPUConst {
61  val BPUDebug = true && !env.FPGAPlatform && env.EnablePerfDebug
62  val EnableCFICommitLog = true
63  val EnbaleCFIPredLog = true
64  val EnableBPUTimeRecord = (EnableCFICommitLog || EnbaleCFIPredLog) && !env.FPGAPlatform
65  val EnableCommit = false
66}
67
68class BPUCtrl(implicit p: Parameters) extends XSBundle {
69  val ubtb_enable = Bool()
70  val btb_enable  = Bool()
71  val bim_enable  = Bool()
72  val tage_enable = Bool()
73  val sc_enable   = Bool()
74  val ras_enable  = Bool()
75  val loop_enable = Bool()
76}
77
78trait BPUUtils extends HasXSParameter {
79  // circular shifting
80  def circularShiftLeft(source: UInt, len: Int, shamt: UInt): UInt = {
81    val res = Wire(UInt(len.W))
82    val higher = source << shamt
83    val lower = source >> (len.U - shamt)
84    res := higher | lower
85    res
86  }
87
88  def circularShiftRight(source: UInt, len: Int, shamt: UInt): UInt = {
89    val res = Wire(UInt(len.W))
90    val higher = source << (len.U - shamt)
91    val lower = source >> shamt
92    res := higher | lower
93    res
94  }
95
96  // To be verified
97  def satUpdate(old: UInt, len: Int, taken: Bool): UInt = {
98    val oldSatTaken = old === ((1 << len)-1).U
99    val oldSatNotTaken = old === 0.U
100    Mux(oldSatTaken && taken, ((1 << len)-1).U,
101      Mux(oldSatNotTaken && !taken, 0.U,
102        Mux(taken, old + 1.U, old - 1.U)))
103  }
104
105  def signedSatUpdate(old: SInt, len: Int, taken: Bool): SInt = {
106    val oldSatTaken = old === ((1 << (len-1))-1).S
107    val oldSatNotTaken = old === (-(1 << (len-1))).S
108    Mux(oldSatTaken && taken, ((1 << (len-1))-1).S,
109      Mux(oldSatNotTaken && !taken, (-(1 << (len-1))).S,
110        Mux(taken, old + 1.S, old - 1.S)))
111  }
112
113  def getFallThroughAddr(start: UInt, carry: Bool, pft: UInt) = {
114    val higher = start.head(VAddrBits-log2Ceil(PredictWidth)-instOffsetBits)
115    Cat(Mux(carry, higher+1.U, higher), pft, 0.U(instOffsetBits.W))
116  }
117
118  def foldTag(tag: UInt, l: Int): UInt = {
119    val nChunks = (tag.getWidth + l - 1) / l
120    val chunks = (0 until nChunks).map { i =>
121      tag(min((i+1)*l, tag.getWidth)-1, i*l)
122    }
123    ParallelXOR(chunks)
124  }
125}
126
127class BasePredictorInput (implicit p: Parameters) extends XSBundle with HasBPUConst {
128  def nInputs = 1
129
130  val s0_pc = Vec(numDup, UInt(VAddrBits.W))
131
132  val folded_hist = Vec(numDup, new AllFoldedHistories(foldedGHistInfos))
133  val s1_folded_hist = Vec(numDup, new AllFoldedHistories(foldedGHistInfos))
134  val ghist = UInt(HistoryLength.W)
135
136  val resp_in = Vec(nInputs, new BranchPredictionResp)
137
138  // val final_preds = Vec(numBpStages, new)
139  // val toFtq_fire = Bool()
140
141  // val s0_all_ready = Bool()
142}
143
144class BasePredictorOutput (implicit p: Parameters) extends BranchPredictionResp {}
145
146class BasePredictorIO (implicit p: Parameters) extends XSBundle with HasBPUConst {
147  val reset_vector = Input(UInt(PAddrBits.W))
148  val in  = Flipped(DecoupledIO(new BasePredictorInput)) // TODO: Remove DecoupledIO
149  // val out = DecoupledIO(new BasePredictorOutput)
150  val out = Output(new BasePredictorOutput)
151  // val flush_out = Valid(UInt(VAddrBits.W))
152
153  val fauftb_entry_in = Input(new FTBEntry)
154  val fauftb_entry_hit_in = Input(Bool())
155  val fauftb_entry_out = Output(new FTBEntry)
156  val fauftb_entry_hit_out = Output(Bool())
157
158  val ctrl = Input(new BPUCtrl)
159
160  val s0_fire = Input(Vec(numDup, Bool()))
161  val s1_fire = Input(Vec(numDup, Bool()))
162  val s2_fire = Input(Vec(numDup, Bool()))
163  val s3_fire = Input(Vec(numDup, Bool()))
164
165  val s2_redirect = Input(Vec(numDup, Bool()))
166  val s3_redirect = Input(Vec(numDup, Bool()))
167
168  val s1_ready = Output(Bool())
169  val s2_ready = Output(Bool())
170  val s3_ready = Output(Bool())
171
172  val update = Flipped(Valid(new BranchPredictionUpdate))
173  val redirect = Flipped(Valid(new BranchPredictionRedirect))
174  val redirectFromIFU = Input(Bool())
175}
176
177abstract class BasePredictor(implicit p: Parameters) extends XSModule
178  with HasBPUConst with BPUUtils with HasPerfEvents {
179  val meta_size = 0
180  val spec_meta_size = 0
181  val is_fast_pred = false
182  val io = IO(new BasePredictorIO())
183
184  io.out := io.in.bits.resp_in(0)
185
186  io.fauftb_entry_out := io.fauftb_entry_in
187  io.fauftb_entry_hit_out := io.fauftb_entry_hit_in
188
189  io.out.last_stage_meta := 0.U
190
191  io.in.ready := !io.redirect.valid
192
193  io.s1_ready := true.B
194  io.s2_ready := true.B
195  io.s3_ready := true.B
196
197  val s0_pc_dup   = WireInit(io.in.bits.s0_pc) // fetchIdx(io.f0_pc)
198  val s1_pc_dup   = s0_pc_dup.zip(io.s0_fire).map {case (s0_pc, s0_fire) => RegEnable(s0_pc, s0_fire)}
199  val s2_pc_dup   = s1_pc_dup.zip(io.s1_fire).map {case (s1_pc, s1_fire) => SegmentedAddrNext(s1_pc, pcSegments, s1_fire, Some("s2_pc"))}
200  val s3_pc_dup   = s2_pc_dup.zip(io.s2_fire).map {case (s2_pc, s2_fire) => SegmentedAddrNext(s2_pc, s2_fire, Some("s3_pc"))}
201
202  when (RegNext(RegNext(reset.asBool) && !reset.asBool)) {
203    s1_pc_dup.map{case s1_pc => s1_pc := io.reset_vector}
204  }
205
206  io.out.s1.pc := s1_pc_dup
207  io.out.s2.pc := s2_pc_dup.map(_.getAddr())
208  io.out.s3.pc := s3_pc_dup.map(_.getAddr())
209
210  val perfEvents: Seq[(String, UInt)] = Seq()
211
212
213  def getFoldedHistoryInfo: Option[Set[FoldedHistoryInfo]] = None
214}
215
216class FakePredictor(implicit p: Parameters) extends BasePredictor {
217  io.in.ready                 := true.B
218  io.out.last_stage_meta      := 0.U
219  io.out := io.in.bits.resp_in(0)
220}
221
222class BpuToFtqIO(implicit p: Parameters) extends XSBundle {
223  val resp = DecoupledIO(new BpuToFtqBundle())
224}
225
226class PredictorIO(implicit p: Parameters) extends XSBundle {
227  val bpu_to_ftq = new BpuToFtqIO()
228  val ftq_to_bpu = Flipped(new FtqToBpuIO)
229  val ctrl = Input(new BPUCtrl)
230  val reset_vector = Input(UInt(PAddrBits.W))
231}
232
233class Predictor(implicit p: Parameters) extends XSModule with HasBPUConst with HasPerfEvents with HasCircularQueuePtrHelper {
234  val io = IO(new PredictorIO)
235
236  val ctrl = DelayN(io.ctrl, 1)
237  val predictors = Module(if (useBPD) new Composer else new FakePredictor)
238
239  def numOfStage = 3
240  require(numOfStage > 1, "BPU numOfStage must be greater than 1")
241  val topdown_stages = RegInit(VecInit(Seq.fill(numOfStage)(0.U.asTypeOf(new FrontendTopDownBundle))))
242
243  // following can only happen on s1
244  val controlRedirectBubble = Wire(Bool())
245  val ControlBTBMissBubble = Wire(Bool())
246  val TAGEMissBubble = Wire(Bool())
247  val SCMissBubble = Wire(Bool())
248  val ITTAGEMissBubble = Wire(Bool())
249  val RASMissBubble = Wire(Bool())
250
251  val memVioRedirectBubble = Wire(Bool())
252  val otherRedirectBubble = Wire(Bool())
253  val btbMissBubble = Wire(Bool())
254  otherRedirectBubble := false.B
255  memVioRedirectBubble := false.B
256
257  // override can happen between s1-s2 and s2-s3
258  val overrideBubble = Wire(Vec(numOfStage - 1, Bool()))
259  def overrideStage = 1
260  // ftq update block can happen on s1, s2 and s3
261  val ftqUpdateBubble = Wire(Vec(numOfStage, Bool()))
262  def ftqUpdateStage = 0
263  // ftq full stall only happens on s3 (last stage)
264  val ftqFullStall = Wire(Bool())
265
266  // by default, no bubble event
267  topdown_stages(0) := 0.U.asTypeOf(new FrontendTopDownBundle)
268  // event movement driven by clock only
269  for (i <- 0 until numOfStage - 1) {
270    topdown_stages(i + 1) := topdown_stages(i)
271  }
272
273
274
275  // ctrl signal
276  predictors.io.ctrl := ctrl
277  predictors.io.reset_vector := io.reset_vector
278
279  val s0_stall_dup = dup_wire(Bool()) // For some reason s0 stalled, usually FTQ Full
280  val s0_fire_dup, s1_fire_dup, s2_fire_dup, s3_fire_dup = dup_wire(Bool())
281  val s1_valid_dup, s2_valid_dup, s3_valid_dup = dup_seq(RegInit(false.B))
282  val s1_ready_dup, s2_ready_dup, s3_ready_dup = dup_wire(Bool())
283  val s1_components_ready_dup, s2_components_ready_dup, s3_components_ready_dup = dup_wire(Bool())
284
285  val s0_pc_dup = dup(WireInit(0.U.asTypeOf(UInt(VAddrBits.W))))
286  val s0_pc_reg_dup = s0_pc_dup.zip(s0_stall_dup).map{ case (s0_pc, s0_stall) => RegEnable(s0_pc, !s0_stall) }
287  when (RegNext(RegNext(reset.asBool) && !reset.asBool)) {
288    s0_pc_reg_dup.map{case s0_pc => s0_pc := io.reset_vector}
289  }
290  val s1_pc = RegEnable(s0_pc_dup(0), s0_fire_dup(0))
291  val s2_pc = RegEnable(s1_pc, s1_fire_dup(0))
292  val s3_pc = RegEnable(s2_pc, s2_fire_dup(0))
293
294  val s0_folded_gh_dup = dup_wire(new AllFoldedHistories(foldedGHistInfos))
295  val s0_folded_gh_reg_dup = s0_folded_gh_dup.zip(s0_stall_dup).map{
296    case (x, s0_stall) => RegEnable(x, 0.U.asTypeOf(s0_folded_gh_dup(0)), !s0_stall)
297  }
298  val s1_folded_gh_dup = RegEnable(s0_folded_gh_dup, 0.U.asTypeOf(s0_folded_gh_dup), s0_fire_dup(1))
299  val s2_folded_gh_dup = RegEnable(s1_folded_gh_dup, 0.U.asTypeOf(s0_folded_gh_dup), s1_fire_dup(1))
300  val s3_folded_gh_dup = RegEnable(s2_folded_gh_dup, 0.U.asTypeOf(s0_folded_gh_dup), s2_fire_dup(1))
301
302  val s0_last_br_num_oh_dup = dup_wire(UInt((numBr+1).W))
303  val s0_last_br_num_oh_reg_dup = s0_last_br_num_oh_dup.zip(s0_stall_dup).map{
304    case (x, s0_stall) => RegEnable(x, 0.U, !s0_stall)
305  }
306  val s1_last_br_num_oh_dup = RegEnable(s0_last_br_num_oh_dup, 0.U.asTypeOf(s0_last_br_num_oh_dup), s0_fire_dup(1))
307  val s2_last_br_num_oh_dup = RegEnable(s1_last_br_num_oh_dup, 0.U.asTypeOf(s0_last_br_num_oh_dup), s1_fire_dup(1))
308  val s3_last_br_num_oh_dup = RegEnable(s2_last_br_num_oh_dup, 0.U.asTypeOf(s0_last_br_num_oh_dup), s2_fire_dup(1))
309
310  val s0_ahead_fh_oldest_bits_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos))
311  val s0_ahead_fh_oldest_bits_reg_dup = s0_ahead_fh_oldest_bits_dup.zip(s0_stall_dup).map{
312    case (x, s0_stall) => RegEnable(x, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup(0)), !s0_stall)
313  }
314  val s1_ahead_fh_oldest_bits_dup = RegEnable(s0_ahead_fh_oldest_bits_dup, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup), s0_fire_dup(1))
315  val s2_ahead_fh_oldest_bits_dup = RegEnable(s1_ahead_fh_oldest_bits_dup, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup), s1_fire_dup(1))
316  val s3_ahead_fh_oldest_bits_dup = RegEnable(s2_ahead_fh_oldest_bits_dup, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup), s2_fire_dup(1))
317
318  val npcGen_dup         = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[UInt])
319  val foldedGhGen_dup    = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[AllFoldedHistories])
320  val ghistPtrGen_dup    = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[CGHPtr])
321  val lastBrNumOHGen_dup = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[UInt])
322  val aheadFhObGen_dup   = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[AllAheadFoldedHistoryOldestBits])
323
324  val ghvBitWriteGens = Seq.tabulate(HistoryLength)(n => new PhyPriorityMuxGenerator[Bool])
325  // val ghistGen = new PhyPriorityMuxGenerator[UInt]
326
327  val ghv = RegInit(0.U.asTypeOf(Vec(HistoryLength, Bool())))
328  val ghv_wire = WireInit(ghv)
329
330  val s0_ghist = WireInit(0.U.asTypeOf(UInt(HistoryLength.W)))
331
332
333  println(f"history buffer length ${HistoryLength}")
334  val ghv_write_datas = Wire(Vec(HistoryLength, Bool()))
335  val ghv_wens = Wire(Vec(HistoryLength, Bool()))
336
337  val s0_ghist_ptr_dup = dup_wire(new CGHPtr)
338  val s0_ghist_ptr_reg_dup = s0_ghist_ptr_dup.zip(s0_stall_dup).map{
339    case (x, s0_stall) => RegEnable(x, 0.U.asTypeOf(new CGHPtr), !s0_stall)
340  }
341  val s1_ghist_ptr_dup = RegEnable(s0_ghist_ptr_dup, 0.U.asTypeOf(s0_ghist_ptr_dup), s0_fire_dup(1))
342  val s2_ghist_ptr_dup = RegEnable(s1_ghist_ptr_dup, 0.U.asTypeOf(s0_ghist_ptr_dup), s1_fire_dup(1))
343  val s3_ghist_ptr_dup = RegEnable(s2_ghist_ptr_dup, 0.U.asTypeOf(s0_ghist_ptr_dup), s2_fire_dup(1))
344
345  def getHist(ptr: CGHPtr): UInt = (Cat(ghv_wire.asUInt, ghv_wire.asUInt) >> (ptr.value+1.U))(HistoryLength-1, 0)
346  s0_ghist := getHist(s0_ghist_ptr_dup(0))
347
348  val resp = predictors.io.out
349
350
351  val toFtq_fire = io.bpu_to_ftq.resp.valid && io.bpu_to_ftq.resp.ready
352
353  val s1_flush_dup, s2_flush_dup, s3_flush_dup = dup_wire(Bool())
354  val s2_redirect_dup, s3_redirect_dup = dup_wire(Bool())
355
356  // predictors.io := DontCare
357  predictors.io.in.valid := s0_fire_dup(0)
358  predictors.io.in.bits.s0_pc := s0_pc_dup
359  predictors.io.in.bits.ghist := s0_ghist
360  predictors.io.in.bits.folded_hist := s0_folded_gh_dup
361  predictors.io.in.bits.s1_folded_hist := s1_folded_gh_dup
362  predictors.io.in.bits.resp_in(0) := (0.U).asTypeOf(new BranchPredictionResp)
363  predictors.io.fauftb_entry_in := (0.U).asTypeOf(new FTBEntry)
364  predictors.io.fauftb_entry_hit_in := false.B
365  predictors.io.redirectFromIFU := RegNext(io.ftq_to_bpu.redirctFromIFU, init=false.B)
366  // predictors.io.in.bits.resp_in(0).s1.pc := s0_pc
367  // predictors.io.in.bits.toFtq_fire := toFtq_fire
368
369  // predictors.io.out.ready := io.bpu_to_ftq.resp.ready
370
371  val redirect_req = io.ftq_to_bpu.redirect
372  val do_redirect_dup = dup_seq(RegNextWithEnable(redirect_req))
373
374  // Pipeline logic
375  s2_redirect_dup.map(_ := false.B)
376  s3_redirect_dup.map(_ := false.B)
377
378  s3_flush_dup.map(_ := redirect_req.valid) // flush when redirect comes
379  for (((s2_flush, s3_flush), s3_redirect) <- s2_flush_dup zip s3_flush_dup zip s3_redirect_dup)
380    s2_flush := s3_flush || s3_redirect
381  for (((s1_flush, s2_flush), s2_redirect) <- s1_flush_dup zip s2_flush_dup zip s2_redirect_dup)
382    s1_flush := s2_flush || s2_redirect
383
384
385  s1_components_ready_dup.map(_ := predictors.io.s1_ready)
386  for (((s1_ready, s1_fire), s1_valid) <- s1_ready_dup zip s1_fire_dup zip s1_valid_dup)
387    s1_ready := s1_fire || !s1_valid
388  for (((s0_fire, s1_components_ready), s1_ready) <- s0_fire_dup zip s1_components_ready_dup zip s1_ready_dup)
389    s0_fire := s1_components_ready && s1_ready
390  predictors.io.s0_fire := s0_fire_dup
391
392  s2_components_ready_dup.map(_ := predictors.io.s2_ready)
393  for (((s2_ready, s2_fire), s2_valid) <- s2_ready_dup zip s2_fire_dup zip s2_valid_dup)
394    s2_ready := s2_fire || !s2_valid
395  for ((((s1_fire, s2_components_ready), s2_ready), s1_valid) <- s1_fire_dup zip s2_components_ready_dup zip s2_ready_dup zip s1_valid_dup)
396    s1_fire := s1_valid && s2_components_ready && s2_ready && io.bpu_to_ftq.resp.ready
397
398  s3_components_ready_dup.map(_ := predictors.io.s3_ready)
399  for (((s3_ready, s3_fire), s3_valid) <- s3_ready_dup zip s3_fire_dup zip s3_valid_dup)
400    s3_ready := s3_fire || !s3_valid
401  for ((((s2_fire, s3_components_ready), s3_ready), s2_valid) <- s2_fire_dup zip s3_components_ready_dup zip s3_ready_dup zip s2_valid_dup)
402    s2_fire := s2_valid && s3_components_ready && s3_ready
403
404  for ((((s0_fire, s1_flush), s1_fire), s1_valid) <- s0_fire_dup zip s1_flush_dup zip s1_fire_dup zip s1_valid_dup) {
405    when (redirect_req.valid) { s1_valid := false.B }
406      .elsewhen(s0_fire)      { s1_valid := true.B  }
407      .elsewhen(s1_flush)     { s1_valid := false.B }
408      .elsewhen(s1_fire)      { s1_valid := false.B }
409  }
410  predictors.io.s1_fire := s1_fire_dup
411
412  s2_fire_dup := s2_valid_dup
413
414  for (((((s1_fire, s2_flush), s2_fire), s2_valid), s1_flush) <-
415    s1_fire_dup zip s2_flush_dup zip s2_fire_dup zip s2_valid_dup zip s1_flush_dup) {
416
417    when (s2_flush)      { s2_valid := false.B   }
418      .elsewhen(s1_fire) { s2_valid := !s1_flush }
419      .elsewhen(s2_fire) { s2_valid := false.B   }
420  }
421
422  predictors.io.s2_fire := s2_fire_dup
423  predictors.io.s2_redirect := s2_redirect_dup
424
425  s3_fire_dup := s3_valid_dup
426
427  for (((((s2_fire, s3_flush), s3_fire), s3_valid), s2_flush) <-
428    s2_fire_dup zip s3_flush_dup zip s3_fire_dup zip s3_valid_dup zip s2_flush_dup) {
429
430    when (s3_flush)      { s3_valid := false.B   }
431      .elsewhen(s2_fire) { s3_valid := !s2_flush }
432      .elsewhen(s3_fire) { s3_valid := false.B   }
433  }
434
435  predictors.io.s3_fire := s3_fire_dup
436  predictors.io.s3_redirect := s3_redirect_dup
437
438
439  io.bpu_to_ftq.resp.valid :=
440    s1_valid_dup(2) && s2_components_ready_dup(2) && s2_ready_dup(2) ||
441    s2_fire_dup(2) && s2_redirect_dup(2) ||
442    s3_fire_dup(2) && s3_redirect_dup(2)
443  io.bpu_to_ftq.resp.bits  := predictors.io.out
444  io.bpu_to_ftq.resp.bits.last_stage_spec_info.histPtr     := s3_ghist_ptr_dup(2)
445
446  val full_pred_diff = WireInit(false.B)
447  val full_pred_diff_stage = WireInit(0.U)
448  val full_pred_diff_offset = WireInit(0.U)
449  for (i <- 0 until numDup - 1) {
450    when (io.bpu_to_ftq.resp.valid &&
451      ((io.bpu_to_ftq.resp.bits.s1.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s1.full_pred(i+1).asTypeOf(UInt()) && io.bpu_to_ftq.resp.bits.s1.full_pred(i).hit) ||
452          (io.bpu_to_ftq.resp.bits.s2.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s2.full_pred(i+1).asTypeOf(UInt()) && io.bpu_to_ftq.resp.bits.s2.full_pred(i).hit) ||
453          (io.bpu_to_ftq.resp.bits.s3.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s3.full_pred(i+1).asTypeOf(UInt()) && io.bpu_to_ftq.resp.bits.s3.full_pred(i).hit))) {
454      full_pred_diff := true.B
455      full_pred_diff_offset := i.U
456      when (io.bpu_to_ftq.resp.bits.s1.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s1.full_pred(i+1).asTypeOf(UInt())) {
457        full_pred_diff_stage := 1.U
458      } .elsewhen (io.bpu_to_ftq.resp.bits.s2.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s2.full_pred(i+1).asTypeOf(UInt())) {
459        full_pred_diff_stage := 2.U
460      } .otherwise {
461        full_pred_diff_stage := 3.U
462      }
463    }
464  }
465  XSError(full_pred_diff, "Full prediction difference detected!")
466
467  // s0_stall should be exclusive with any other PC source
468  s0_stall_dup.zip(s1_valid_dup).zip(s2_redirect_dup).zip(s3_redirect_dup).zip(do_redirect_dup).foreach {
469    case ((((s0_stall, s1_valid), s2_redirect), s3_redirect), do_redirect) => {
470      s0_stall := !(s1_valid || s2_redirect || s3_redirect || do_redirect.valid)
471    }
472  }
473  // Power-on reset
474  val powerOnResetState = RegInit(true.B)
475  when(s0_fire_dup(0)) {
476    // When BPU pipeline first time fire, we consider power-on reset is done
477    powerOnResetState := false.B
478  }
479  XSError(!powerOnResetState && s0_stall_dup(0) && s0_pc_dup(0) =/= s0_pc_reg_dup(0), "s0_stall but s0_pc is differenct from s0_pc_reg")
480
481  npcGen_dup.zip(s0_pc_reg_dup).map{ case (gen, reg) =>
482    gen.register(true.B, reg, Some("stallPC"), 0)}
483  foldedGhGen_dup.zip(s0_folded_gh_reg_dup).map{ case (gen, reg) =>
484    gen.register(true.B, reg, Some("stallFGH"), 0)}
485  ghistPtrGen_dup.zip(s0_ghist_ptr_reg_dup).map{ case (gen, reg) =>
486    gen.register(true.B, reg, Some("stallGHPtr"), 0)}
487  lastBrNumOHGen_dup.zip(s0_last_br_num_oh_reg_dup).map{ case (gen, reg) =>
488    gen.register(true.B, reg, Some("stallBrNumOH"), 0)}
489  aheadFhObGen_dup.zip(s0_ahead_fh_oldest_bits_reg_dup).map{ case (gen, reg) =>
490    gen.register(true.B, reg, Some("stallAFHOB"), 0)}
491
492  // assign pred cycle for profiling
493  io.bpu_to_ftq.resp.bits.s1.full_pred.map(_.predCycle.map(_ := GTimer()))
494  io.bpu_to_ftq.resp.bits.s2.full_pred.map(_.predCycle.map(_ := GTimer()))
495  io.bpu_to_ftq.resp.bits.s3.full_pred.map(_.predCycle.map(_ := GTimer()))
496
497
498
499  // History manage
500  // s1
501  val s1_possible_predicted_ghist_ptrs_dup = s1_ghist_ptr_dup.map(ptr => (0 to numBr).map(ptr - _.U))
502  val s1_predicted_ghist_ptr_dup = s1_possible_predicted_ghist_ptrs_dup.zip(resp.s1.lastBrPosOH).map{ case (ptr, oh) => Mux1H(oh, ptr)}
503  val s1_possible_predicted_fhs_dup =
504    for (((((fgh, afh), br_num_oh), t), br_pos_oh) <-
505      s1_folded_gh_dup zip s1_ahead_fh_oldest_bits_dup zip s1_last_br_num_oh_dup zip resp.s1.brTaken zip resp.s1.lastBrPosOH)
506      yield (0 to numBr).map(i =>
507        fgh.update(afh, br_num_oh, i, t & br_pos_oh(i))
508      )
509  val s1_predicted_fh_dup = resp.s1.lastBrPosOH.zip(s1_possible_predicted_fhs_dup).map{ case (oh, fh) => Mux1H(oh, fh)}
510
511  val s1_ahead_fh_ob_src_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos))
512  s1_ahead_fh_ob_src_dup.zip(s1_ghist_ptr_dup).map{ case (src, ptr) => src.read(ghv, ptr)}
513
514  if (EnableGHistDiff) {
515    val s1_predicted_ghist = WireInit(getHist(s1_predicted_ghist_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool())))
516    for (i <- 0 until numBr) {
517      when (resp.s1.shouldShiftVec(0)(i)) {
518        s1_predicted_ghist(i) := resp.s1.brTaken(0) && (i==0).B
519      }
520    }
521    when (s1_valid_dup(0)) {
522      s0_ghist := s1_predicted_ghist.asUInt
523    }
524  }
525
526  val s1_ghv_wens = (0 until HistoryLength).map(n =>
527    (0 until numBr).map(b => (s1_ghist_ptr_dup(0)).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s1.shouldShiftVec(0)(b) && s1_valid_dup(0)))
528  val s1_ghv_wdatas = (0 until HistoryLength).map(n =>
529    Mux1H(
530      (0 until numBr).map(b => (
531        (s1_ghist_ptr_dup(0)).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s1.shouldShiftVec(0)(b),
532        resp.s1.brTaken(0) && resp.s1.lastBrPosOH(0)(b+1)
533      ))
534    )
535  )
536
537
538  for (((npcGen, s1_valid), s1_target) <- npcGen_dup zip s1_valid_dup zip resp.s1.getTarget)
539    npcGen.register(s1_valid, s1_target, Some("s1_target"), 4)
540  for (((foldedGhGen, s1_valid), s1_predicted_fh) <- foldedGhGen_dup zip s1_valid_dup zip s1_predicted_fh_dup)
541    foldedGhGen.register(s1_valid, s1_predicted_fh, Some("s1_FGH"), 4)
542  for (((ghistPtrGen, s1_valid), s1_predicted_ghist_ptr) <- ghistPtrGen_dup zip s1_valid_dup zip s1_predicted_ghist_ptr_dup)
543    ghistPtrGen.register(s1_valid, s1_predicted_ghist_ptr, Some("s1_GHPtr"), 4)
544  for (((lastBrNumOHGen, s1_valid), s1_brPosOH) <- lastBrNumOHGen_dup zip s1_valid_dup zip resp.s1.lastBrPosOH.map(_.asUInt))
545    lastBrNumOHGen.register(s1_valid, s1_brPosOH, Some("s1_BrNumOH"), 4)
546  for (((aheadFhObGen, s1_valid), s1_ahead_fh_ob_src) <- aheadFhObGen_dup zip s1_valid_dup zip s1_ahead_fh_ob_src_dup)
547    aheadFhObGen.register(s1_valid, s1_ahead_fh_ob_src, Some("s1_AFHOB"), 4)
548  ghvBitWriteGens.zip(s1_ghv_wens).zipWithIndex.map{case ((b, w), i) =>
549    b.register(w.reduce(_||_), s1_ghv_wdatas(i), Some(s"s1_new_bit_$i"), 4)
550  }
551
552  class PreviousPredInfo extends Bundle {
553    val hit = Vec(numDup, Bool())
554    val target = Vec(numDup, UInt(VAddrBits.W))
555    val lastBrPosOH = Vec(numDup, Vec(numBr+1, Bool()))
556    val taken = Vec(numDup, Bool())
557    val takenMask = Vec(numDup, Vec(numBr, Bool()))
558    val cfiIndex = Vec(numDup, UInt(log2Ceil(PredictWidth).W))
559  }
560
561  def preds_needs_redirect_vec_dup(x: PreviousPredInfo, y: BranchPredictionBundle) = {
562    // Timing optimization
563    // We first compare all target with previous stage target,
564    // then select the difference by taken & hit
565    // Usually target is generated quicker than taken, so do target compare before select can help timing
566    val targetDiffVec: IndexedSeq[Vec[Bool]] =
567      x.target.zip(y.getAllTargets).map {
568        case (xTarget, yAllTarget) => VecInit(yAllTarget.map(_ =/= xTarget))
569      } // [numDup][all Target comparison]
570    val targetDiff   : IndexedSeq[Bool]      =
571      targetDiffVec.zip(x.hit).zip(x.takenMask).map {
572        case ((diff, hit), takenMask) => selectByTaken(takenMask, hit, diff)
573      } // [numDup]
574
575    val lastBrPosOHDiff: IndexedSeq[Bool]      = x.lastBrPosOH.zip(y.lastBrPosOH).map { case (oh1, oh2) => oh1.asUInt =/= oh2.asUInt }
576    val takenDiff      : IndexedSeq[Bool]      = x.taken.zip(y.taken).map { case (t1, t2) => t1 =/= t2 }
577    val takenOffsetDiff: IndexedSeq[Bool]      = x.cfiIndex.zip(y.cfiIndex).zip(x.taken).zip(y.taken).map { case (((i1, i2), xt), yt) => xt && yt && i1 =/= i2.bits }
578    VecInit(
579      for ((((tgtd, lbpohd), tkd), tod) <-
580             targetDiff zip lastBrPosOHDiff zip takenDiff zip takenOffsetDiff)
581      yield VecInit(tgtd, lbpohd, tkd, tod)
582      // x.shouldShiftVec.asUInt =/= y.shouldShiftVec.asUInt,
583      // x.brTaken =/= y.brTaken
584    )
585  }
586
587  // s2
588  val s2_possible_predicted_ghist_ptrs_dup = s2_ghist_ptr_dup.map(ptr => (0 to numBr).map(ptr - _.U))
589  val s2_predicted_ghist_ptr_dup = s2_possible_predicted_ghist_ptrs_dup.zip(resp.s2.lastBrPosOH).map{ case (ptr, oh) => Mux1H(oh, ptr)}
590
591  val s2_possible_predicted_fhs_dup =
592    for ((((fgh, afh), br_num_oh), full_pred) <-
593      s2_folded_gh_dup zip s2_ahead_fh_oldest_bits_dup zip s2_last_br_num_oh_dup zip resp.s2.full_pred)
594      yield (0 to numBr).map(i =>
595        fgh.update(afh, br_num_oh, i, if (i > 0) full_pred.br_taken_mask(i-1) else false.B)
596      )
597  val s2_predicted_fh_dup = resp.s2.lastBrPosOH.zip(s2_possible_predicted_fhs_dup).map{ case (oh, fh) => Mux1H(oh, fh)}
598
599  val s2_ahead_fh_ob_src_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos))
600  s2_ahead_fh_ob_src_dup.zip(s2_ghist_ptr_dup).map{ case (src, ptr) => src.read(ghv, ptr)}
601
602  if (EnableGHistDiff) {
603    val s2_predicted_ghist = WireInit(getHist(s2_predicted_ghist_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool())))
604    for (i <- 0 until numBr) {
605      when (resp.s2.shouldShiftVec(0)(i)) {
606        s2_predicted_ghist(i) := resp.s2.brTaken(0) && (i==0).B
607      }
608    }
609    when(s2_redirect_dup(0)) {
610      s0_ghist := s2_predicted_ghist.asUInt
611    }
612  }
613
614  val s2_ghv_wens = (0 until HistoryLength).map(n =>
615    (0 until numBr).map(b => (s2_ghist_ptr_dup(0)).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s2.shouldShiftVec(0)(b) && s2_redirect_dup(0)))
616  val s2_ghv_wdatas = (0 until HistoryLength).map(n =>
617    Mux1H(
618      (0 until numBr).map(b => (
619        (s2_ghist_ptr_dup(0)).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s2.shouldShiftVec(0)(b),
620        resp.s2.full_pred(0).real_br_taken_mask()(b)
621      ))
622    )
623  )
624
625  val s1_pred_info = Wire(new PreviousPredInfo)
626  s1_pred_info.hit := resp.s1.full_pred.map(_.hit)
627  s1_pred_info.target := resp.s1.getTarget
628  s1_pred_info.lastBrPosOH := resp.s1.lastBrPosOH
629  s1_pred_info.taken := resp.s1.taken
630  s1_pred_info.takenMask := resp.s1.full_pred.map(_.taken_mask_on_slot)
631  s1_pred_info.cfiIndex := resp.s1.cfiIndex.map { case x => x.bits }
632
633  val previous_s1_pred_info = RegEnable(s1_pred_info, 0.U.asTypeOf(new PreviousPredInfo), s1_fire_dup(0))
634
635  val s2_redirect_s1_last_pred_vec_dup = preds_needs_redirect_vec_dup(previous_s1_pred_info, resp.s2)
636
637  for (((s2_redirect, s2_fire), s2_redirect_s1_last_pred_vec) <- s2_redirect_dup zip s2_fire_dup zip s2_redirect_s1_last_pred_vec_dup)
638    s2_redirect := s2_fire && s2_redirect_s1_last_pred_vec.reduce(_||_)
639
640
641  for (((npcGen, s2_redirect), s2_target) <- npcGen_dup zip s2_redirect_dup zip resp.s2.getTarget)
642    npcGen.register(s2_redirect, s2_target, Some("s2_target"), 5)
643  for (((foldedGhGen, s2_redirect), s2_predicted_fh) <- foldedGhGen_dup zip s2_redirect_dup zip s2_predicted_fh_dup)
644    foldedGhGen.register(s2_redirect, s2_predicted_fh, Some("s2_FGH"), 5)
645  for (((ghistPtrGen, s2_redirect), s2_predicted_ghist_ptr) <- ghistPtrGen_dup zip s2_redirect_dup zip s2_predicted_ghist_ptr_dup)
646    ghistPtrGen.register(s2_redirect, s2_predicted_ghist_ptr, Some("s2_GHPtr"), 5)
647  for (((lastBrNumOHGen, s2_redirect), s2_brPosOH) <- lastBrNumOHGen_dup zip s2_redirect_dup zip resp.s2.lastBrPosOH.map(_.asUInt))
648    lastBrNumOHGen.register(s2_redirect, s2_brPosOH, Some("s2_BrNumOH"), 5)
649  for (((aheadFhObGen, s2_redirect), s2_ahead_fh_ob_src) <- aheadFhObGen_dup zip s2_redirect_dup zip s2_ahead_fh_ob_src_dup)
650    aheadFhObGen.register(s2_redirect, s2_ahead_fh_ob_src, Some("s2_AFHOB"), 5)
651  ghvBitWriteGens.zip(s2_ghv_wens).zipWithIndex.map{case ((b, w), i) =>
652    b.register(w.reduce(_||_), s2_ghv_wdatas(i), Some(s"s2_new_bit_$i"), 5)
653  }
654
655  XSPerfAccumulate("s2_redirect_because_target_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(0))
656  XSPerfAccumulate("s2_redirect_because_branch_num_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(1))
657  XSPerfAccumulate("s2_redirect_because_direction_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(2))
658  XSPerfAccumulate("s2_redirect_because_cfi_idx_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(3))
659  // XSPerfAccumulate("s2_redirect_because_shouldShiftVec_diff", s2_fire && s2_redirect_s1_last_pred_vec(4))
660  // XSPerfAccumulate("s2_redirect_because_brTaken_diff", s2_fire && s2_redirect_s1_last_pred_vec(5))
661  XSPerfAccumulate("s2_redirect_because_fallThroughError", s2_fire_dup(0) && resp.s2.fallThruError(0))
662
663  XSPerfAccumulate("s2_redirect_when_taken", s2_redirect_dup(0) && resp.s2.taken(0) && resp.s2.full_pred(0).hit)
664  XSPerfAccumulate("s2_redirect_when_not_taken", s2_redirect_dup(0) && !resp.s2.taken(0) && resp.s2.full_pred(0).hit)
665  XSPerfAccumulate("s2_redirect_when_not_hit", s2_redirect_dup(0) && !resp.s2.full_pred(0).hit)
666
667
668  // s3
669  val s3_possible_predicted_ghist_ptrs_dup = s3_ghist_ptr_dup.map(ptr => (0 to numBr).map(ptr - _.U))
670  val s3_predicted_ghist_ptr_dup = s3_possible_predicted_ghist_ptrs_dup.zip(resp.s3.lastBrPosOH).map{ case (ptr, oh) => Mux1H(oh, ptr)}
671
672  val s3_possible_predicted_fhs_dup =
673    for ((((fgh, afh), br_num_oh), full_pred) <-
674      s3_folded_gh_dup zip s3_ahead_fh_oldest_bits_dup zip s3_last_br_num_oh_dup zip resp.s3.full_pred)
675      yield (0 to numBr).map(i =>
676        fgh.update(afh, br_num_oh, i, if (i > 0) full_pred.br_taken_mask(i-1) else false.B)
677      )
678  val s3_predicted_fh_dup = resp.s3.lastBrPosOH.zip(s3_possible_predicted_fhs_dup).map{ case (oh, fh) => Mux1H(oh, fh)}
679
680  val s3_ahead_fh_ob_src_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos))
681  s3_ahead_fh_ob_src_dup.zip(s3_ghist_ptr_dup).map{ case (src, ptr) => src.read(ghv, ptr)}
682
683  if (EnableGHistDiff) {
684    val s3_predicted_ghist = WireInit(getHist(s3_predicted_ghist_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool())))
685    for (i <- 0 until numBr) {
686      when (resp.s3.shouldShiftVec(0)(i)) {
687        s3_predicted_ghist(i) := resp.s3.brTaken(0) && (i==0).B
688      }
689    }
690    when(s3_redirect_dup(0)) {
691      s0_ghist := s3_predicted_ghist.asUInt
692    }
693  }
694
695  val s3_ghv_wens = (0 until HistoryLength).map(n =>
696    (0 until numBr).map(b => (s3_ghist_ptr_dup(0)).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s3.shouldShiftVec(0)(b) && s3_redirect_dup(0)))
697  val s3_ghv_wdatas = (0 until HistoryLength).map(n =>
698    Mux1H(
699      (0 until numBr).map(b => (
700        (s3_ghist_ptr_dup(0)).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s3.shouldShiftVec(0)(b),
701        resp.s3.full_pred(0).real_br_taken_mask()(b)
702      ))
703    )
704  )
705
706  val previous_s2_pred = RegEnable(resp.s2, 0.U.asTypeOf(resp.s2), s2_fire_dup(0))
707
708  val s3_redirect_on_br_taken_dup = resp.s3.full_pred.zip(previous_s2_pred.full_pred).map {case (fp1, fp2) => fp1.real_br_taken_mask().asUInt =/= fp2.real_br_taken_mask().asUInt}
709  val s3_both_first_taken_dup = resp.s3.full_pred.zip(previous_s2_pred.full_pred).map {case (fp1, fp2) => fp1.real_br_taken_mask()(0) && fp2.real_br_taken_mask()(0)}
710  val s3_redirect_on_target_dup = resp.s3.getTarget.zip(previous_s2_pred.getTarget).map {case (t1, t2) => t1 =/= t2}
711  val s3_redirect_on_jalr_target_dup = resp.s3.full_pred.zip(previous_s2_pred.full_pred).map {case (fp1, fp2) => fp1.hit_taken_on_jalr && fp1.jalr_target =/= fp2.jalr_target}
712  val s3_redirect_on_fall_thru_error_dup = resp.s3.fallThruError
713  val s3_redirect_on_ftb_multi_hit_dup = resp.s3.ftbMultiHit
714
715  for (((((((s3_redirect, s3_fire), s3_redirect_on_br_taken), s3_redirect_on_target), s3_redirect_on_fall_thru_error), s3_redirect_on_ftb_multi_hit), s3_both_first_taken) <-
716    s3_redirect_dup zip s3_fire_dup zip s3_redirect_on_br_taken_dup zip s3_redirect_on_target_dup zip s3_redirect_on_fall_thru_error_dup zip s3_redirect_on_ftb_multi_hit_dup zip s3_both_first_taken_dup) {
717
718    s3_redirect := s3_fire && (
719      (s3_redirect_on_br_taken && !s3_both_first_taken) || s3_redirect_on_target || s3_redirect_on_fall_thru_error || s3_redirect_on_ftb_multi_hit
720    )
721  }
722
723  XSPerfAccumulate(f"s3_redirect_on_br_taken", s3_fire_dup(0) && s3_redirect_on_br_taken_dup(0))
724  XSPerfAccumulate(f"s3_redirect_on_jalr_target", s3_fire_dup(0) && s3_redirect_on_jalr_target_dup(0))
725  XSPerfAccumulate(f"s3_redirect_on_others", s3_redirect_dup(0) && !(s3_redirect_on_br_taken_dup(0) || s3_redirect_on_jalr_target_dup(0)))
726
727  for (((npcGen, s3_redirect), s3_target) <- npcGen_dup zip s3_redirect_dup zip resp.s3.getTarget)
728    npcGen.register(s3_redirect, s3_target, Some("s3_target"), 3)
729  for (((foldedGhGen, s3_redirect), s3_predicted_fh) <- foldedGhGen_dup zip s3_redirect_dup zip s3_predicted_fh_dup)
730    foldedGhGen.register(s3_redirect, s3_predicted_fh, Some("s3_FGH"), 3)
731  for (((ghistPtrGen, s3_redirect), s3_predicted_ghist_ptr) <- ghistPtrGen_dup zip s3_redirect_dup zip s3_predicted_ghist_ptr_dup)
732    ghistPtrGen.register(s3_redirect, s3_predicted_ghist_ptr, Some("s3_GHPtr"), 3)
733  for (((lastBrNumOHGen, s3_redirect), s3_brPosOH) <- lastBrNumOHGen_dup zip s3_redirect_dup zip resp.s3.lastBrPosOH.map(_.asUInt))
734    lastBrNumOHGen.register(s3_redirect, s3_brPosOH, Some("s3_BrNumOH"), 3)
735  for (((aheadFhObGen, s3_redirect), s3_ahead_fh_ob_src) <- aheadFhObGen_dup zip s3_redirect_dup zip s3_ahead_fh_ob_src_dup)
736    aheadFhObGen.register(s3_redirect, s3_ahead_fh_ob_src, Some("s3_AFHOB"), 3)
737  ghvBitWriteGens.zip(s3_ghv_wens).zipWithIndex.map{case ((b, w), i) =>
738    b.register(w.reduce(_||_), s3_ghv_wdatas(i), Some(s"s3_new_bit_$i"), 3)
739  }
740
741  // Send signal tell Ftq override
742  val s2_ftq_idx = RegEnable(io.ftq_to_bpu.enq_ptr, s1_fire_dup(0))
743  val s3_ftq_idx = RegEnable(s2_ftq_idx, s2_fire_dup(0))
744
745  for (((to_ftq_s1_valid, s1_fire), s1_flush) <- io.bpu_to_ftq.resp.bits.s1.valid zip s1_fire_dup zip s1_flush_dup) {
746    to_ftq_s1_valid := s1_fire && !s1_flush
747  }
748  io.bpu_to_ftq.resp.bits.s1.hasRedirect.map(_ := false.B)
749  io.bpu_to_ftq.resp.bits.s1.ftq_idx := DontCare
750  for (((to_ftq_s2_valid, s2_fire), s2_flush) <- io.bpu_to_ftq.resp.bits.s2.valid zip s2_fire_dup zip s2_flush_dup) {
751    to_ftq_s2_valid := s2_fire && !s2_flush
752  }
753  io.bpu_to_ftq.resp.bits.s2.hasRedirect.zip(s2_redirect_dup).map {case (hr, r) => hr := r}
754  io.bpu_to_ftq.resp.bits.s2.ftq_idx := s2_ftq_idx
755  for (((to_ftq_s3_valid, s3_fire), s3_flush) <- io.bpu_to_ftq.resp.bits.s3.valid zip s3_fire_dup zip s3_flush_dup) {
756    to_ftq_s3_valid := s3_fire && !s3_flush
757  }
758  io.bpu_to_ftq.resp.bits.s3.hasRedirect.zip(s3_redirect_dup).map {case (hr, r) => hr := r}
759  io.bpu_to_ftq.resp.bits.s3.ftq_idx := s3_ftq_idx
760
761  predictors.io.update.valid := RegNext(io.ftq_to_bpu.update.valid, init = false.B)
762  predictors.io.update.bits := RegEnable(io.ftq_to_bpu.update.bits, io.ftq_to_bpu.update.valid)
763  predictors.io.update.bits.ghist := RegEnable(
764    getHist(io.ftq_to_bpu.update.bits.spec_info.histPtr), io.ftq_to_bpu.update.valid)
765
766  val redirect_dup = do_redirect_dup.map(_.bits)
767  predictors.io.redirect := do_redirect_dup(0)
768
769  // Redirect logic
770  val shift_dup = redirect_dup.map(_.cfiUpdate.shift)
771  val addIntoHist_dup = redirect_dup.map(_.cfiUpdate.addIntoHist)
772  // TODO: remove these below
773  val shouldShiftVec_dup = shift_dup.map(shift => Mux(shift === 0.U, VecInit(0.U((1 << (log2Ceil(numBr) + 1)).W).asBools), VecInit((LowerMask(1.U << (shift-1.U))).asBools)))
774  // TODO end
775  val afhob_dup = redirect_dup.map(_.cfiUpdate.afhob)
776  val lastBrNumOH_dup = redirect_dup.map(_.cfiUpdate.lastBrNumOH)
777
778
779  val isBr_dup = redirect_dup.map(_.cfiUpdate.pd.isBr)
780  val taken_dup = redirect_dup.map(_.cfiUpdate.taken)
781  val real_br_taken_mask_dup =
782    for (((shift, taken), addIntoHist) <- shift_dup zip taken_dup zip addIntoHist_dup)
783      yield (0 until numBr).map(i => shift === (i+1).U && taken && addIntoHist )
784
785  val oldPtr_dup = redirect_dup.map(_.cfiUpdate.histPtr)
786  val updated_ptr_dup = oldPtr_dup.zip(shift_dup).map {case (oldPtr, shift) => oldPtr - shift}
787  def computeFoldedHist(hist: UInt, compLen: Int)(histLen: Int): UInt = {
788    if (histLen > 0) {
789      val nChunks     = (histLen + compLen - 1) / compLen
790      val hist_chunks = (0 until nChunks) map { i =>
791        hist(min((i + 1) * compLen, histLen) - 1, i * compLen)
792      }
793      ParallelXOR(hist_chunks)
794    }
795    else 0.U
796  }
797
798  val oldFh_dup = dup_seq(WireInit(0.U.asTypeOf(new AllFoldedHistories(foldedGHistInfos))))
799  oldFh_dup.zip(oldPtr_dup).map { case (oldFh, oldPtr) =>
800      foldedGHistInfos.foreach { case (histLen, compLen) =>
801        oldFh.getHistWithInfo((histLen, compLen)).folded_hist := computeFoldedHist(getHist(oldPtr), compLen)(histLen)
802      }
803  }
804
805  val updated_fh_dup =
806    for (((((oldFh, oldPtr), taken), addIntoHist), shift) <-
807      oldFh_dup zip oldPtr_dup zip taken_dup zip addIntoHist_dup zip shift_dup)
808    yield VecInit((0 to numBr).map(i => oldFh.update(ghv, oldPtr, i, taken && addIntoHist)))(shift)
809  val thisBrNumOH_dup = shift_dup.map(shift => UIntToOH(shift, numBr+1))
810  val thisAheadFhOb_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos))
811  thisAheadFhOb_dup.zip(oldPtr_dup).map {case (afhob, oldPtr) => afhob.read(ghv, oldPtr)}
812  val redirect_ghv_wens = (0 until HistoryLength).map(n =>
813    (0 until numBr).map(b => oldPtr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && shouldShiftVec_dup(0)(b) && do_redirect_dup(0).valid))
814  val redirect_ghv_wdatas = (0 until HistoryLength).map(n =>
815    Mux1H(
816      (0 until numBr).map(b => oldPtr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && shouldShiftVec_dup(0)(b)),
817      real_br_taken_mask_dup(0)
818    )
819  )
820
821  if (EnableGHistDiff) {
822    val updated_ghist = WireInit(getHist(updated_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool())))
823    for (i <- 0 until numBr) {
824      when (shift_dup(0) >= (i+1).U) {
825        updated_ghist(i) := taken_dup(0) && addIntoHist_dup(0) && (i==0).B
826      }
827    }
828    when(do_redirect_dup(0).valid) {
829      s0_ghist := updated_ghist.asUInt
830    }
831  }
832
833  // Commit time history checker
834  if (EnableCommitGHistDiff) {
835    val commitGHist = RegInit(0.U.asTypeOf(Vec(HistoryLength, Bool())))
836    val commitGHistPtr = RegInit(0.U.asTypeOf(new CGHPtr))
837    def getCommitHist(ptr: CGHPtr): UInt =
838      (Cat(commitGHist.asUInt, commitGHist.asUInt) >> (ptr.value+1.U))(HistoryLength-1, 0)
839
840    val updateValid        : Bool      = io.ftq_to_bpu.update.valid
841    val branchValidMask    : UInt      = io.ftq_to_bpu.update.bits.ftb_entry.brValids.asUInt
842    val branchCommittedMask: Vec[Bool] = io.ftq_to_bpu.update.bits.br_committed
843    val misPredictMask     : UInt      = io.ftq_to_bpu.update.bits.mispred_mask.asUInt
844    val takenMask          : UInt      =
845      io.ftq_to_bpu.update.bits.br_taken_mask.asUInt |
846        io.ftq_to_bpu.update.bits.ftb_entry.always_taken.asUInt // Always taken branch is recorded in history
847    val takenIdx       : UInt = (PriorityEncoder(takenMask) + 1.U((log2Ceil(numBr)+1).W)).asUInt
848    val misPredictIdx  : UInt = (PriorityEncoder(misPredictMask) + 1.U((log2Ceil(numBr)+1).W)).asUInt
849    val shouldShiftMask: UInt = Mux(takenMask.orR,
850        LowerMask(takenIdx).asUInt,
851        ((1 << numBr) - 1).asUInt) &
852      Mux(misPredictMask.orR,
853        LowerMask(misPredictIdx).asUInt,
854        ((1 << numBr) - 1).asUInt) &
855      branchCommittedMask.asUInt
856    val updateShift    : UInt   =
857      Mux(updateValid && branchValidMask.orR, PopCount(branchValidMask & shouldShiftMask), 0.U)
858
859    // Maintain the commitGHist
860    for (i <- 0 until numBr) {
861      when(updateShift >= (i + 1).U) {
862        val ptr: CGHPtr = commitGHistPtr - i.asUInt
863        commitGHist(ptr.value) := takenMask(i)
864      }
865    }
866    when(updateValid) {
867      commitGHistPtr := commitGHistPtr - updateShift
868    }
869
870    // Calculate true history using Parallel XOR
871    // Do differential
872    TageTableInfos.map {
873      case (nRows, histLen, _) => {
874        val nRowsPerBr = nRows / numBr
875        val predictGHistPtr = io.ftq_to_bpu.update.bits.spec_info.histPtr
876        val commitTrueHist: UInt = computeFoldedHist(getCommitHist(commitGHistPtr), log2Ceil(nRowsPerBr))(histLen)
877        val predictFHist  : UInt = computeFoldedHist(getHist(predictGHistPtr), log2Ceil(nRowsPerBr))(histLen)
878        XSWarn(updateValid && predictFHist =/= commitTrueHist,
879          p"predict time ghist: ${predictFHist} is different from commit time: ${commitTrueHist}\n")
880      }
881    }
882  }
883
884
885  // val updatedGh = oldGh.update(shift, taken && addIntoHist)
886  for ((npcGen, do_redirect) <- npcGen_dup zip do_redirect_dup)
887    npcGen.register(do_redirect.valid, do_redirect.bits.cfiUpdate.target, Some("redirect_target"), 2)
888  for (((foldedGhGen, do_redirect), updated_fh) <- foldedGhGen_dup zip do_redirect_dup zip updated_fh_dup)
889    foldedGhGen.register(do_redirect.valid, updated_fh, Some("redirect_FGHT"), 2)
890  for (((ghistPtrGen, do_redirect), updated_ptr) <- ghistPtrGen_dup zip do_redirect_dup zip updated_ptr_dup)
891    ghistPtrGen.register(do_redirect.valid, updated_ptr, Some("redirect_GHPtr"), 2)
892  for (((lastBrNumOHGen, do_redirect), thisBrNumOH) <- lastBrNumOHGen_dup zip do_redirect_dup zip thisBrNumOH_dup)
893    lastBrNumOHGen.register(do_redirect.valid, thisBrNumOH, Some("redirect_BrNumOH"), 2)
894  for (((aheadFhObGen, do_redirect), thisAheadFhOb) <- aheadFhObGen_dup zip do_redirect_dup zip thisAheadFhOb_dup)
895    aheadFhObGen.register(do_redirect.valid, thisAheadFhOb, Some("redirect_AFHOB"), 2)
896  ghvBitWriteGens.zip(redirect_ghv_wens).zipWithIndex.map{case ((b, w), i) =>
897    b.register(w.reduce(_||_), redirect_ghv_wdatas(i), Some(s"redirect_new_bit_$i"), 2)
898  }
899  // no need to assign s0_last_pred
900
901  // val need_reset = RegNext(reset.asBool) && !reset.asBool
902
903  // Reset
904  // npcGen.register(need_reset, resetVector.U, Some("reset_pc"), 1)
905  // foldedGhGen.register(need_reset, 0.U.asTypeOf(s0_folded_gh), Some("reset_FGH"), 1)
906  // ghistPtrGen.register(need_reset, 0.U.asTypeOf(new CGHPtr), Some("reset_GHPtr"), 1)
907
908  s0_pc_dup.zip(npcGen_dup).map {case (s0_pc, npcGen) => s0_pc := npcGen()}
909  s0_folded_gh_dup.zip(foldedGhGen_dup).map {case (s0_folded_gh, foldedGhGen) => s0_folded_gh := foldedGhGen()}
910  s0_ghist_ptr_dup.zip(ghistPtrGen_dup).map {case (s0_ghist_ptr, ghistPtrGen) => s0_ghist_ptr := ghistPtrGen()}
911  s0_ahead_fh_oldest_bits_dup.zip(aheadFhObGen_dup).map {case (s0_ahead_fh_oldest_bits, aheadFhObGen) =>
912    s0_ahead_fh_oldest_bits := aheadFhObGen()}
913  s0_last_br_num_oh_dup.zip(lastBrNumOHGen_dup).map {case (s0_last_br_num_oh, lastBrNumOHGen) =>
914    s0_last_br_num_oh := lastBrNumOHGen()}
915  (ghv_write_datas zip ghvBitWriteGens).map{case (wd, d) => wd := d()}
916  for (i <- 0 until HistoryLength) {
917    ghv_wens(i) := Seq(s1_ghv_wens, s2_ghv_wens, s3_ghv_wens, redirect_ghv_wens).map(_(i).reduce(_||_)).reduce(_||_)
918    when (ghv_wens(i)) {
919      ghv(i) := ghv_write_datas(i)
920    }
921  }
922
923  // TODO: signals for memVio and other Redirects
924  controlRedirectBubble := do_redirect_dup(0).valid && do_redirect_dup(0).bits.ControlRedirectBubble
925  ControlBTBMissBubble := do_redirect_dup(0).bits.ControlBTBMissBubble
926  TAGEMissBubble := do_redirect_dup(0).bits.TAGEMissBubble
927  SCMissBubble := do_redirect_dup(0).bits.SCMissBubble
928  ITTAGEMissBubble := do_redirect_dup(0).bits.ITTAGEMissBubble
929  RASMissBubble := do_redirect_dup(0).bits.RASMissBubble
930
931  memVioRedirectBubble := do_redirect_dup(0).valid && do_redirect_dup(0).bits.MemVioRedirectBubble
932  otherRedirectBubble := do_redirect_dup(0).valid && do_redirect_dup(0).bits.OtherRedirectBubble
933  btbMissBubble := do_redirect_dup(0).valid && do_redirect_dup(0).bits.BTBMissBubble
934  overrideBubble(0) := s2_redirect_dup(0)
935  overrideBubble(1) := s3_redirect_dup(0)
936  ftqUpdateBubble(0) := !s1_components_ready_dup(0)
937  ftqUpdateBubble(1) := !s2_components_ready_dup(0)
938  ftqUpdateBubble(2) := !s3_components_ready_dup(0)
939  ftqFullStall := !io.bpu_to_ftq.resp.ready
940  io.bpu_to_ftq.resp.bits.topdown_info := topdown_stages(numOfStage - 1)
941
942  // topdown handling logic here
943  when (controlRedirectBubble) {
944    /*
945    for (i <- 0 until numOfStage)
946      topdown_stages(i).reasons(TopDownCounters.ControlRedirectBubble.id) := true.B
947    io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.ControlRedirectBubble.id) := true.B
948    */
949    when (ControlBTBMissBubble) {
950      for (i <- 0 until numOfStage)
951        topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id) := true.B
952      io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.BTBMissBubble.id) := true.B
953    } .elsewhen (TAGEMissBubble) {
954      for (i <- 0 until numOfStage)
955        topdown_stages(i).reasons(TopDownCounters.TAGEMissBubble.id) := true.B
956      io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.TAGEMissBubble.id) := true.B
957    } .elsewhen (SCMissBubble) {
958      for (i <- 0 until numOfStage)
959        topdown_stages(i).reasons(TopDownCounters.SCMissBubble.id) := true.B
960      io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.SCMissBubble.id) := true.B
961    } .elsewhen (ITTAGEMissBubble) {
962      for (i <- 0 until numOfStage)
963        topdown_stages(i).reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B
964      io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B
965    } .elsewhen (RASMissBubble) {
966      for (i <- 0 until numOfStage)
967        topdown_stages(i).reasons(TopDownCounters.RASMissBubble.id) := true.B
968      io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.RASMissBubble.id) := true.B
969    }
970  }
971  when (memVioRedirectBubble) {
972    for (i <- 0 until numOfStage)
973      topdown_stages(i).reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B
974    io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B
975  }
976  when (otherRedirectBubble) {
977    for (i <- 0 until numOfStage)
978      topdown_stages(i).reasons(TopDownCounters.OtherRedirectBubble.id) := true.B
979    io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.OtherRedirectBubble.id) := true.B
980  }
981  when (btbMissBubble) {
982    for (i <- 0 until numOfStage)
983      topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id) := true.B
984    io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.BTBMissBubble.id) := true.B
985  }
986
987  for (i <- 0 until numOfStage) {
988    if (i < numOfStage - overrideStage) {
989      when (overrideBubble(i)) {
990        for (j <- 0 to i)
991          topdown_stages(j).reasons(TopDownCounters.OverrideBubble.id) := true.B
992      }
993    }
994    if (i < numOfStage - ftqUpdateStage) {
995      when (ftqUpdateBubble(i)) {
996        topdown_stages(i).reasons(TopDownCounters.FtqUpdateBubble.id) := true.B
997      }
998    }
999  }
1000  when (ftqFullStall) {
1001    topdown_stages(0).reasons(TopDownCounters.FtqFullStall.id) := true.B
1002  }
1003
1004  XSError(isBefore(redirect_dup(0).cfiUpdate.histPtr, s3_ghist_ptr_dup(0)) && do_redirect_dup(0).valid,
1005    p"s3_ghist_ptr ${s3_ghist_ptr_dup(0)} exceeds redirect histPtr ${redirect_dup(0).cfiUpdate.histPtr}\n")
1006  XSError(isBefore(redirect_dup(0).cfiUpdate.histPtr, s2_ghist_ptr_dup(0)) && do_redirect_dup(0).valid,
1007    p"s2_ghist_ptr ${s2_ghist_ptr_dup(0)} exceeds redirect histPtr ${redirect_dup(0).cfiUpdate.histPtr}\n")
1008  XSError(isBefore(redirect_dup(0).cfiUpdate.histPtr, s1_ghist_ptr_dup(0)) && do_redirect_dup(0).valid,
1009    p"s1_ghist_ptr ${s1_ghist_ptr_dup(0)} exceeds redirect histPtr ${redirect_dup(0).cfiUpdate.histPtr}\n")
1010
1011  XSDebug(RegNext(reset.asBool) && !reset.asBool, "Reseting...\n")
1012  XSDebug(io.ftq_to_bpu.update.valid, p"Update from ftq\n")
1013  XSDebug(io.ftq_to_bpu.redirect.valid, p"Redirect from ftq\n")
1014
1015  XSDebug("[BP0]                 fire=%d                      pc=%x\n", s0_fire_dup(0), s0_pc_dup(0))
1016  XSDebug("[BP1] v=%d r=%d cr=%d fire=%d             flush=%d pc=%x\n",
1017    s1_valid_dup(0), s1_ready_dup(0), s1_components_ready_dup(0), s1_fire_dup(0), s1_flush_dup(0), s1_pc)
1018  XSDebug("[BP2] v=%d r=%d cr=%d fire=%d redirect=%d flush=%d pc=%x\n",
1019    s2_valid_dup(0), s2_ready_dup(0), s2_components_ready_dup(0), s2_fire_dup(0), s2_redirect_dup(0), s2_flush_dup(0), s2_pc)
1020  XSDebug("[BP3] v=%d r=%d cr=%d fire=%d redirect=%d flush=%d pc=%x\n",
1021    s3_valid_dup(0), s3_ready_dup(0), s3_components_ready_dup(0), s3_fire_dup(0), s3_redirect_dup(0), s3_flush_dup(0), s3_pc)
1022  XSDebug("[FTQ] ready=%d\n", io.bpu_to_ftq.resp.ready)
1023  XSDebug("resp.s1.target=%x\n", resp.s1.getTarget(0))
1024  XSDebug("resp.s2.target=%x\n", resp.s2.getTarget(0))
1025  // XSDebug("s0_ghist: %b\n", s0_ghist.predHist)
1026  // XSDebug("s1_ghist: %b\n", s1_ghist.predHist)
1027  // XSDebug("s2_ghist: %b\n", s2_ghist.predHist)
1028  // XSDebug("s2_predicted_ghist: %b\n", s2_predicted_ghist.predHist)
1029  XSDebug(p"s0_ghist_ptr: ${s0_ghist_ptr_dup(0)}\n")
1030  XSDebug(p"s1_ghist_ptr: ${s1_ghist_ptr_dup(0)}\n")
1031  XSDebug(p"s2_ghist_ptr: ${s2_ghist_ptr_dup(0)}\n")
1032  XSDebug(p"s3_ghist_ptr: ${s3_ghist_ptr_dup(0)}\n")
1033
1034  io.ftq_to_bpu.update.bits.display(io.ftq_to_bpu.update.valid)
1035  io.ftq_to_bpu.redirect.bits.display(io.ftq_to_bpu.redirect.valid)
1036
1037
1038  XSPerfAccumulate("s2_redirect", s2_redirect_dup(0))
1039  XSPerfAccumulate("s3_redirect", s3_redirect_dup(0))
1040  XSPerfAccumulate("s1_not_valid", !s1_valid_dup(0))
1041
1042  val perfEvents = predictors.asInstanceOf[Composer].getPerfEvents
1043  generatePerfEvent()
1044}
1045