xref: /XiangShan/src/main/scala/xiangshan/frontend/BPU.scala (revision a0c65233389cccd2fdffe58236fb0a7dedf6d54f)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.frontend
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.experimental.chiselName
22import chisel3.util._
23import xiangshan._
24import utils._
25import utility._
26
27import scala.math.min
28import xiangshan.backend.decode.ImmUnion
29
30trait HasBPUConst extends HasXSParameter {
31  val MaxMetaLength = if (!env.FPGAPlatform) 512 else 256 // TODO: Reduce meta length
32  val MaxBasicBlockSize = 32
33  val LHistoryLength = 32
34  // val numBr = 2
35  val useBPD = true
36  val useLHist = true
37  val numBrSlot = numBr-1
38  val totalSlot = numBrSlot + 1
39
40  val numDup = 4
41
42  def BP_STAGES = (0 until 3).map(_.U(2.W))
43  def BP_S1 = BP_STAGES(0)
44  def BP_S2 = BP_STAGES(1)
45  def BP_S3 = BP_STAGES(2)
46
47  def dup_seq[T](src: T, num: Int = numDup) = Seq.tabulate(num)(n => src)
48  def dup[T <: Data](src: T, num: Int = numDup) = VecInit(Seq.tabulate(num)(n => src))
49  def dup_wire[T <: Data](src: T, num: Int = numDup) = Wire(Vec(num, src.cloneType))
50  def dup_idx = Seq.tabulate(numDup)(n => n.toString())
51  val numBpStages = BP_STAGES.length
52
53  val debug = true
54  // TODO: Replace log2Up by log2Ceil
55}
56
57trait HasBPUParameter extends HasXSParameter with HasBPUConst {
58  val BPUDebug = true && !env.FPGAPlatform && env.EnablePerfDebug
59  val EnableCFICommitLog = true
60  val EnbaleCFIPredLog = true
61  val EnableBPUTimeRecord = (EnableCFICommitLog || EnbaleCFIPredLog) && !env.FPGAPlatform
62  val EnableCommit = false
63}
64
65class BPUCtrl(implicit p: Parameters) extends XSBundle {
66  val ubtb_enable = Bool()
67  val btb_enable  = Bool()
68  val bim_enable  = Bool()
69  val tage_enable = Bool()
70  val sc_enable   = Bool()
71  val ras_enable  = Bool()
72  val loop_enable = Bool()
73}
74
75trait BPUUtils extends HasXSParameter {
76  // circular shifting
77  def circularShiftLeft(source: UInt, len: Int, shamt: UInt): UInt = {
78    val res = Wire(UInt(len.W))
79    val higher = source << shamt
80    val lower = source >> (len.U - shamt)
81    res := higher | lower
82    res
83  }
84
85  def circularShiftRight(source: UInt, len: Int, shamt: UInt): UInt = {
86    val res = Wire(UInt(len.W))
87    val higher = source << (len.U - shamt)
88    val lower = source >> shamt
89    res := higher | lower
90    res
91  }
92
93  // To be verified
94  def satUpdate(old: UInt, len: Int, taken: Bool): UInt = {
95    val oldSatTaken = old === ((1 << len)-1).U
96    val oldSatNotTaken = old === 0.U
97    Mux(oldSatTaken && taken, ((1 << len)-1).U,
98      Mux(oldSatNotTaken && !taken, 0.U,
99        Mux(taken, old + 1.U, old - 1.U)))
100  }
101
102  def signedSatUpdate(old: SInt, len: Int, taken: Bool): SInt = {
103    val oldSatTaken = old === ((1 << (len-1))-1).S
104    val oldSatNotTaken = old === (-(1 << (len-1))).S
105    Mux(oldSatTaken && taken, ((1 << (len-1))-1).S,
106      Mux(oldSatNotTaken && !taken, (-(1 << (len-1))).S,
107        Mux(taken, old + 1.S, old - 1.S)))
108  }
109
110  def getFallThroughAddr(start: UInt, carry: Bool, pft: UInt) = {
111    val higher = start.head(VAddrBits-log2Ceil(PredictWidth)-instOffsetBits)
112    Cat(Mux(carry, higher+1.U, higher), pft, 0.U(instOffsetBits.W))
113  }
114
115  def foldTag(tag: UInt, l: Int): UInt = {
116    val nChunks = (tag.getWidth + l - 1) / l
117    val chunks = (0 until nChunks).map { i =>
118      tag(min((i+1)*l, tag.getWidth)-1, i*l)
119    }
120    ParallelXOR(chunks)
121  }
122}
123
124class BasePredictorInput (implicit p: Parameters) extends XSBundle with HasBPUConst {
125  def nInputs = 1
126
127  val s0_pc = Vec(numDup, UInt(VAddrBits.W))
128
129  val folded_hist = Vec(numDup, new AllFoldedHistories(foldedGHistInfos))
130  val ghist = UInt(HistoryLength.W)
131
132  val resp_in = Vec(nInputs, new BranchPredictionResp)
133
134  // val final_preds = Vec(numBpStages, new)
135  // val toFtq_fire = Bool()
136
137  // val s0_all_ready = Bool()
138}
139
140class BasePredictorOutput (implicit p: Parameters) extends BranchPredictionResp {}
141
142class BasePredictorIO (implicit p: Parameters) extends XSBundle with HasBPUConst {
143  val reset_vector = Input(UInt(PAddrBits.W))
144  val in  = Flipped(DecoupledIO(new BasePredictorInput)) // TODO: Remove DecoupledIO
145  // val out = DecoupledIO(new BasePredictorOutput)
146  val out = Output(new BasePredictorOutput)
147  // val flush_out = Valid(UInt(VAddrBits.W))
148
149  val ctrl = Input(new BPUCtrl)
150
151  val s0_fire = Input(Vec(numDup, Bool()))
152  val s1_fire = Input(Vec(numDup, Bool()))
153  val s2_fire = Input(Vec(numDup, Bool()))
154  val s3_fire = Input(Vec(numDup, Bool()))
155
156  val s2_redirect = Input(Vec(numDup, Bool()))
157  val s3_redirect = Input(Vec(numDup, Bool()))
158
159  val s1_ready = Output(Bool())
160  val s2_ready = Output(Bool())
161  val s3_ready = Output(Bool())
162
163  val update = Flipped(Valid(new BranchPredictionUpdate))
164  val redirect = Flipped(Valid(new BranchPredictionRedirect))
165}
166
167abstract class BasePredictor(implicit p: Parameters) extends XSModule
168  with HasBPUConst with BPUUtils with HasPerfEvents {
169  val meta_size = 0
170  val spec_meta_size = 0
171  val is_fast_pred = false
172  val io = IO(new BasePredictorIO())
173
174  io.out := io.in.bits.resp_in(0)
175
176  io.out.last_stage_meta := 0.U
177
178  io.in.ready := !io.redirect.valid
179
180  io.s1_ready := true.B
181  io.s2_ready := true.B
182  io.s3_ready := true.B
183
184  val reset_vector = DelayN(io.reset_vector, 5)
185
186  val s0_pc_dup   = WireInit(io.in.bits.s0_pc) // fetchIdx(io.f0_pc)
187  val s1_pc_dup   = s0_pc_dup.zip(io.s0_fire).map {case (s0_pc, s0_fire) => RegEnable(s0_pc, s0_fire)}
188  val s2_pc_dup   = s1_pc_dup.zip(io.s1_fire).map {case (s1_pc, s1_fire) => RegEnable(s1_pc, s1_fire)}
189  val s3_pc_dup   = s2_pc_dup.zip(io.s2_fire).map {case (s2_pc, s2_fire) => RegEnable(s2_pc, s2_fire)}
190
191  when (RegNext(RegNext(reset.asBool) && !reset.asBool)) {
192    s1_pc_dup.map{case s1_pc => s1_pc := reset_vector}
193  }
194
195  io.out.s1.pc := s1_pc_dup
196  io.out.s2.pc := s2_pc_dup
197  io.out.s3.pc := s3_pc_dup
198
199  val perfEvents: Seq[(String, UInt)] = Seq()
200
201
202  def getFoldedHistoryInfo: Option[Set[FoldedHistoryInfo]] = None
203}
204
205class FakePredictor(implicit p: Parameters) extends BasePredictor {
206  io.in.ready                 := true.B
207  io.out.last_stage_meta      := 0.U
208  io.out := io.in.bits.resp_in(0)
209}
210
211class BpuToFtqIO(implicit p: Parameters) extends XSBundle {
212  val resp = DecoupledIO(new BpuToFtqBundle())
213}
214
215class PredictorIO(implicit p: Parameters) extends XSBundle {
216  val bpu_to_ftq = new BpuToFtqIO()
217  val ftq_to_bpu = Flipped(new FtqToBpuIO())
218  val ctrl = Input(new BPUCtrl)
219  val reset_vector = Input(UInt(PAddrBits.W))
220}
221
222@chiselName
223class Predictor(implicit p: Parameters) extends XSModule with HasBPUConst with HasPerfEvents with HasCircularQueuePtrHelper {
224  val io = IO(new PredictorIO)
225
226  val ctrl = DelayN(io.ctrl, 1)
227  val predictors = Module(if (useBPD) new Composer else new FakePredictor)
228
229  def numOfStage = 3
230  require(numOfStage > 1, "BPU numOfStage must be greater than 1")
231  val topdown_stages = RegInit(VecInit(Seq.fill(numOfStage)(0.U.asTypeOf(new FrontendTopDownBundle))))
232  dontTouch(topdown_stages)
233
234  // following can only happen on s1
235  val controlRedirectBubble = Wire(Bool())
236  val ControlBTBMissBubble = Wire(Bool())
237  val TAGEMissBubble = Wire(Bool())
238  val SCMissBubble = Wire(Bool())
239  val ITTAGEMissBubble = Wire(Bool())
240  val RASMissBubble = Wire(Bool())
241
242  val memVioRedirectBubble = Wire(Bool())
243  val otherRedirectBubble = Wire(Bool())
244  val btbMissBubble = Wire(Bool())
245  otherRedirectBubble := false.B
246  memVioRedirectBubble := false.B
247
248  // override can happen between s1-s2 and s2-s3
249  val overrideBubble = Wire(Vec(numOfStage - 1, Bool()))
250  def overrideStage = 1
251  // ftq update block can happen on s1, s2 and s3
252  val ftqUpdateBubble = Wire(Vec(numOfStage, Bool()))
253  def ftqUpdateStage = 0
254  // ftq full stall only happens on s3 (last stage)
255  val ftqFullStall = Wire(Bool())
256
257  // by default, no bubble event
258  topdown_stages(0) := 0.U.asTypeOf(new FrontendTopDownBundle)
259  // event movement driven by clock only
260  for (i <- 0 until numOfStage - 1) {
261    topdown_stages(i + 1) := topdown_stages(i)
262  }
263
264
265
266  // ctrl signal
267  predictors.io.ctrl := ctrl
268  predictors.io.reset_vector := io.reset_vector
269
270
271  val reset_vector = DelayN(io.reset_vector, 5)
272
273  val s0_fire_dup, s1_fire_dup, s2_fire_dup, s3_fire_dup = dup_wire(Bool())
274  val s1_valid_dup, s2_valid_dup, s3_valid_dup = dup_seq(RegInit(false.B))
275  val s1_ready_dup, s2_ready_dup, s3_ready_dup = dup_wire(Bool())
276  val s1_components_ready_dup, s2_components_ready_dup, s3_components_ready_dup = dup_wire(Bool())
277
278  val s0_pc_dup = dup(WireInit(0.U.asTypeOf(UInt(VAddrBits.W))))
279  val s0_pc_reg_dup = s0_pc_dup.map(x => RegNext(x))
280  when (RegNext(RegNext(reset.asBool) && !reset.asBool)) {
281    s0_pc_reg_dup.map{case s0_pc => s0_pc := reset_vector}
282  }
283  val s1_pc = RegEnable(s0_pc_dup(0), s0_fire_dup(0))
284  val s2_pc = RegEnable(s1_pc, s1_fire_dup(0))
285  val s3_pc = RegEnable(s2_pc, s2_fire_dup(0))
286
287  val s0_folded_gh_dup = dup_wire(new AllFoldedHistories(foldedGHistInfos))
288  val s0_folded_gh_reg_dup = s0_folded_gh_dup.map(x => RegNext(x, init=0.U.asTypeOf(s0_folded_gh_dup(0))))
289  val s1_folded_gh_dup = RegEnable(s0_folded_gh_dup, 0.U.asTypeOf(s0_folded_gh_dup), s0_fire_dup(1))
290  val s2_folded_gh_dup = RegEnable(s1_folded_gh_dup, 0.U.asTypeOf(s0_folded_gh_dup), s1_fire_dup(1))
291  val s3_folded_gh_dup = RegEnable(s2_folded_gh_dup, 0.U.asTypeOf(s0_folded_gh_dup), s2_fire_dup(1))
292
293  val s0_last_br_num_oh_dup = dup_wire(UInt((numBr+1).W))
294  val s0_last_br_num_oh_reg_dup = s0_last_br_num_oh_dup.map(x => RegNext(x, init=0.U))
295  val s1_last_br_num_oh_dup = RegEnable(s0_last_br_num_oh_dup, 0.U.asTypeOf(s0_last_br_num_oh_dup), s0_fire_dup(1))
296  val s2_last_br_num_oh_dup = RegEnable(s1_last_br_num_oh_dup, 0.U.asTypeOf(s0_last_br_num_oh_dup), s1_fire_dup(1))
297  val s3_last_br_num_oh_dup = RegEnable(s2_last_br_num_oh_dup, 0.U.asTypeOf(s0_last_br_num_oh_dup), s2_fire_dup(1))
298
299  val s0_ahead_fh_oldest_bits_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos))
300  val s0_ahead_fh_oldest_bits_reg_dup = s0_ahead_fh_oldest_bits_dup.map(x => RegNext(x, init=0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup(0))))
301  val s1_ahead_fh_oldest_bits_dup = RegEnable(s0_ahead_fh_oldest_bits_dup, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup), s0_fire_dup(1))
302  val s2_ahead_fh_oldest_bits_dup = RegEnable(s1_ahead_fh_oldest_bits_dup, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup), s1_fire_dup(1))
303  val s3_ahead_fh_oldest_bits_dup = RegEnable(s2_ahead_fh_oldest_bits_dup, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup), s2_fire_dup(1))
304
305  val npcGen_dup         = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[UInt])
306  val foldedGhGen_dup    = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[AllFoldedHistories])
307  val ghistPtrGen_dup    = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[CGHPtr])
308  val lastBrNumOHGen_dup = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[UInt])
309  val aheadFhObGen_dup   = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[AllAheadFoldedHistoryOldestBits])
310
311  val ghvBitWriteGens = Seq.tabulate(HistoryLength)(n => new PhyPriorityMuxGenerator[Bool])
312  // val ghistGen = new PhyPriorityMuxGenerator[UInt]
313
314  val ghv = RegInit(0.U.asTypeOf(Vec(HistoryLength, Bool())))
315  val ghv_wire = WireInit(ghv)
316
317  val s0_ghist = WireInit(0.U.asTypeOf(UInt(HistoryLength.W)))
318
319
320  println(f"history buffer length ${HistoryLength}")
321  val ghv_write_datas = Wire(Vec(HistoryLength, Bool()))
322  val ghv_wens = Wire(Vec(HistoryLength, Bool()))
323
324  val s0_ghist_ptr_dup = dup_wire(new CGHPtr)
325  val s0_ghist_ptr_reg_dup = s0_ghist_ptr_dup.map(x => RegNext(x, init=0.U.asTypeOf(new CGHPtr)))
326  val s1_ghist_ptr_dup = RegEnable(s0_ghist_ptr_dup, 0.U.asTypeOf(s0_ghist_ptr_dup), s0_fire_dup(1))
327  val s2_ghist_ptr_dup = RegEnable(s1_ghist_ptr_dup, 0.U.asTypeOf(s0_ghist_ptr_dup), s1_fire_dup(1))
328  val s3_ghist_ptr_dup = RegEnable(s2_ghist_ptr_dup, 0.U.asTypeOf(s0_ghist_ptr_dup), s2_fire_dup(1))
329
330  def getHist(ptr: CGHPtr): UInt = (Cat(ghv_wire.asUInt, ghv_wire.asUInt) >> (ptr.value+1.U))(HistoryLength-1, 0)
331  s0_ghist := getHist(s0_ghist_ptr_dup(0))
332
333  val resp = predictors.io.out
334
335
336  val toFtq_fire = io.bpu_to_ftq.resp.valid && io.bpu_to_ftq.resp.ready
337
338  val s1_flush_dup, s2_flush_dup, s3_flush_dup = dup_wire(Bool())
339  val s2_redirect_dup, s3_redirect_dup = dup_wire(Bool())
340
341  // predictors.io := DontCare
342  predictors.io.in.valid := s0_fire_dup(0)
343  predictors.io.in.bits.s0_pc := s0_pc_dup
344  predictors.io.in.bits.ghist := s0_ghist
345  predictors.io.in.bits.folded_hist := s0_folded_gh_dup
346  predictors.io.in.bits.resp_in(0) := (0.U).asTypeOf(new BranchPredictionResp)
347  // predictors.io.in.bits.resp_in(0).s1.pc := s0_pc
348  // predictors.io.in.bits.toFtq_fire := toFtq_fire
349
350  // predictors.io.out.ready := io.bpu_to_ftq.resp.ready
351
352  val redirect_req = io.ftq_to_bpu.redirect
353  val do_redirect_dup = dup_seq(RegNext(redirect_req, init=0.U.asTypeOf(io.ftq_to_bpu.redirect)))
354
355  // Pipeline logic
356  s2_redirect_dup.map(_ := false.B)
357  s3_redirect_dup.map(_ := false.B)
358
359  s3_flush_dup.map(_ := redirect_req.valid) // flush when redirect comes
360  for (((s2_flush, s3_flush), s3_redirect) <- s2_flush_dup zip s3_flush_dup zip s3_redirect_dup)
361    s2_flush := s3_flush || s3_redirect
362  for (((s1_flush, s2_flush), s2_redirect) <- s1_flush_dup zip s2_flush_dup zip s2_redirect_dup)
363    s1_flush := s2_flush || s2_redirect
364
365
366  s1_components_ready_dup.map(_ := predictors.io.s1_ready)
367  for (((s1_ready, s1_fire), s1_valid) <- s1_ready_dup zip s1_fire_dup zip s1_valid_dup)
368    s1_ready := s1_fire || !s1_valid
369  for (((s0_fire, s1_components_ready), s1_ready) <- s0_fire_dup zip s1_components_ready_dup zip s1_ready_dup)
370    s0_fire := s1_components_ready && s1_ready
371  predictors.io.s0_fire := s0_fire_dup
372
373  s2_components_ready_dup.map(_ := predictors.io.s2_ready)
374  for (((s2_ready, s2_fire), s2_valid) <- s2_ready_dup zip s2_fire_dup zip s2_valid_dup)
375    s2_ready := s2_fire || !s2_valid
376  for ((((s1_fire, s2_components_ready), s2_ready), s1_valid) <- s1_fire_dup zip s2_components_ready_dup zip s2_ready_dup zip s1_valid_dup)
377    s1_fire := s1_valid && s2_components_ready && s2_ready && io.bpu_to_ftq.resp.ready
378
379  s3_components_ready_dup.map(_ := predictors.io.s3_ready)
380  for (((s3_ready, s3_fire), s3_valid) <- s3_ready_dup zip s3_fire_dup zip s3_valid_dup)
381    s3_ready := s3_fire || !s3_valid
382  for ((((s2_fire, s3_components_ready), s3_ready), s2_valid) <- s2_fire_dup zip s3_components_ready_dup zip s3_ready_dup zip s2_valid_dup)
383    s2_fire := s2_valid && s3_components_ready && s3_ready
384
385  for ((((s0_fire, s1_flush), s1_fire), s1_valid) <- s0_fire_dup zip s1_flush_dup zip s1_fire_dup zip s1_valid_dup) {
386    when (redirect_req.valid) { s1_valid := false.B }
387      .elsewhen(s0_fire)      { s1_valid := true.B  }
388      .elsewhen(s1_flush)     { s1_valid := false.B }
389      .elsewhen(s1_fire)      { s1_valid := false.B }
390  }
391  predictors.io.s1_fire := s1_fire_dup
392
393  s2_fire_dup := s2_valid_dup
394
395  for (((((s1_fire, s2_flush), s2_fire), s2_valid), s1_flush) <-
396    s1_fire_dup zip s2_flush_dup zip s2_fire_dup zip s2_valid_dup zip s1_flush_dup) {
397
398    when (s2_flush)      { s2_valid := false.B   }
399      .elsewhen(s1_fire) { s2_valid := !s1_flush }
400      .elsewhen(s2_fire) { s2_valid := false.B   }
401  }
402
403  predictors.io.s2_fire := s2_fire_dup
404  predictors.io.s2_redirect := s2_redirect_dup
405
406  s3_fire_dup := s3_valid_dup
407
408  for (((((s2_fire, s3_flush), s3_fire), s3_valid), s2_flush) <-
409    s2_fire_dup zip s3_flush_dup zip s3_fire_dup zip s3_valid_dup zip s2_flush_dup) {
410
411    when (s3_flush)      { s3_valid := false.B   }
412      .elsewhen(s2_fire) { s3_valid := !s2_flush }
413      .elsewhen(s3_fire) { s3_valid := false.B   }
414  }
415
416  predictors.io.s3_fire := s3_fire_dup
417  predictors.io.s3_redirect := s3_redirect_dup
418
419
420  io.bpu_to_ftq.resp.valid :=
421    s1_valid_dup(2) && s2_components_ready_dup(2) && s2_ready_dup(2) ||
422    s2_fire_dup(2) && s2_redirect_dup(2) ||
423    s3_fire_dup(2) && s3_redirect_dup(2)
424  io.bpu_to_ftq.resp.bits  := predictors.io.out
425  io.bpu_to_ftq.resp.bits.last_stage_spec_info.folded_hist := s3_folded_gh_dup(2)
426  io.bpu_to_ftq.resp.bits.last_stage_spec_info.histPtr     := s3_ghist_ptr_dup(2)
427  io.bpu_to_ftq.resp.bits.last_stage_spec_info.lastBrNumOH := s3_last_br_num_oh_dup(2)
428  io.bpu_to_ftq.resp.bits.last_stage_spec_info.afhob       := s3_ahead_fh_oldest_bits_dup(2)
429
430  val full_pred_diff = WireInit(false.B)
431  val full_pred_diff_stage = WireInit(0.U)
432  val full_pred_diff_offset = WireInit(0.U)
433  dontTouch(full_pred_diff)
434  dontTouch(full_pred_diff_stage)
435  dontTouch(full_pred_diff_offset)
436  for (i <- 0 until numDup - 1) {
437    when (io.bpu_to_ftq.resp.valid &&
438      ((io.bpu_to_ftq.resp.bits.s1.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s1.full_pred(i+1).asTypeOf(UInt()) && io.bpu_to_ftq.resp.bits.s1.full_pred(i).hit) ||
439          (io.bpu_to_ftq.resp.bits.s2.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s2.full_pred(i+1).asTypeOf(UInt()) && io.bpu_to_ftq.resp.bits.s2.full_pred(i).hit) ||
440          (io.bpu_to_ftq.resp.bits.s3.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s3.full_pred(i+1).asTypeOf(UInt()) && io.bpu_to_ftq.resp.bits.s3.full_pred(i).hit))) {
441      full_pred_diff := true.B
442      full_pred_diff_offset := i.U
443      when (io.bpu_to_ftq.resp.bits.s1.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s1.full_pred(i+1).asTypeOf(UInt())) {
444        full_pred_diff_stage := 1.U
445      } .elsewhen (io.bpu_to_ftq.resp.bits.s2.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s2.full_pred(i+1).asTypeOf(UInt())) {
446        full_pred_diff_stage := 2.U
447      } .otherwise {
448        full_pred_diff_stage := 3.U
449      }
450    }
451  }
452  XSError(full_pred_diff, "Full prediction difference detected!")
453
454  npcGen_dup.zip(s0_pc_reg_dup).map{ case (gen, reg) =>
455    gen.register(true.B, reg, Some("stallPC"), 0)}
456  foldedGhGen_dup.zip(s0_folded_gh_reg_dup).map{ case (gen, reg) =>
457    gen.register(true.B, reg, Some("stallFGH"), 0)}
458  ghistPtrGen_dup.zip(s0_ghist_ptr_reg_dup).map{ case (gen, reg) =>
459    gen.register(true.B, reg, Some("stallGHPtr"), 0)}
460  lastBrNumOHGen_dup.zip(s0_last_br_num_oh_reg_dup).map{ case (gen, reg) =>
461    gen.register(true.B, reg, Some("stallBrNumOH"), 0)}
462  aheadFhObGen_dup.zip(s0_ahead_fh_oldest_bits_reg_dup).map{ case (gen, reg) =>
463    gen.register(true.B, reg, Some("stallAFHOB"), 0)}
464
465  // assign pred cycle for profiling
466  io.bpu_to_ftq.resp.bits.s1.full_pred.map(_.predCycle.map(_ := GTimer()))
467  io.bpu_to_ftq.resp.bits.s2.full_pred.map(_.predCycle.map(_ := GTimer()))
468  io.bpu_to_ftq.resp.bits.s3.full_pred.map(_.predCycle.map(_ := GTimer()))
469
470
471
472  // History manage
473  // s1
474  val s1_possible_predicted_ghist_ptrs_dup = s1_ghist_ptr_dup.map(ptr => (0 to numBr).map(ptr - _.U))
475  val s1_predicted_ghist_ptr_dup = s1_possible_predicted_ghist_ptrs_dup.zip(resp.s1.lastBrPosOH).map{ case (ptr, oh) => Mux1H(oh, ptr)}
476  val s1_possible_predicted_fhs_dup =
477    for (((((fgh, afh), br_num_oh), t), br_pos_oh) <-
478      s1_folded_gh_dup zip s1_ahead_fh_oldest_bits_dup zip s1_last_br_num_oh_dup zip resp.s1.brTaken zip resp.s1.lastBrPosOH)
479      yield (0 to numBr).map(i =>
480        fgh.update(afh, br_num_oh, i, t & br_pos_oh(i))
481      )
482  val s1_predicted_fh_dup = resp.s1.lastBrPosOH.zip(s1_possible_predicted_fhs_dup).map{ case (oh, fh) => Mux1H(oh, fh)}
483
484  val s1_ahead_fh_ob_src_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos))
485  s1_ahead_fh_ob_src_dup.zip(s1_ghist_ptr_dup).map{ case (src, ptr) => src.read(ghv, ptr)}
486
487  if (EnableGHistDiff) {
488    val s1_predicted_ghist = WireInit(getHist(s1_predicted_ghist_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool())))
489    for (i <- 0 until numBr) {
490      when (resp.s1.shouldShiftVec(0)(i)) {
491        s1_predicted_ghist(i) := resp.s1.brTaken(0) && (i==0).B
492      }
493    }
494    when (s1_valid_dup(0)) {
495      s0_ghist := s1_predicted_ghist.asUInt
496    }
497  }
498
499  val s1_ghv_wens = (0 until HistoryLength).map(n =>
500    (0 until numBr).map(b => (s1_ghist_ptr_dup(0)).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s1.shouldShiftVec(0)(b) && s1_valid_dup(0)))
501  val s1_ghv_wdatas = (0 until HistoryLength).map(n =>
502    Mux1H(
503      (0 until numBr).map(b => (
504        (s1_ghist_ptr_dup(0)).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s1.shouldShiftVec(0)(b),
505        resp.s1.brTaken(0) && resp.s1.lastBrPosOH(0)(b+1)
506      ))
507    )
508  )
509
510
511  for (((npcGen, s1_valid), s1_target) <- npcGen_dup zip s1_valid_dup zip resp.s1.getTarget)
512    npcGen.register(s1_valid, s1_target, Some("s1_target"), 4)
513  for (((foldedGhGen, s1_valid), s1_predicted_fh) <- foldedGhGen_dup zip s1_valid_dup zip s1_predicted_fh_dup)
514    foldedGhGen.register(s1_valid, s1_predicted_fh, Some("s1_FGH"), 4)
515  for (((ghistPtrGen, s1_valid), s1_predicted_ghist_ptr) <- ghistPtrGen_dup zip s1_valid_dup zip s1_predicted_ghist_ptr_dup)
516    ghistPtrGen.register(s1_valid, s1_predicted_ghist_ptr, Some("s1_GHPtr"), 4)
517  for (((lastBrNumOHGen, s1_valid), s1_brPosOH) <- lastBrNumOHGen_dup zip s1_valid_dup zip resp.s1.lastBrPosOH.map(_.asUInt))
518    lastBrNumOHGen.register(s1_valid, s1_brPosOH, Some("s1_BrNumOH"), 4)
519  for (((aheadFhObGen, s1_valid), s1_ahead_fh_ob_src) <- aheadFhObGen_dup zip s1_valid_dup zip s1_ahead_fh_ob_src_dup)
520    aheadFhObGen.register(s1_valid, s1_ahead_fh_ob_src, Some("s1_AFHOB"), 4)
521  ghvBitWriteGens.zip(s1_ghv_wens).zipWithIndex.map{case ((b, w), i) =>
522    b.register(w.reduce(_||_), s1_ghv_wdatas(i), Some(s"s1_new_bit_$i"), 4)
523  }
524
525  class PreviousPredInfo extends Bundle {
526    val target = Vec(numDup, UInt(VAddrBits.W))
527    val lastBrPosOH = Vec(numDup, Vec(numBr+1, Bool()))
528    val taken = Vec(numDup, Bool())
529    val cfiIndex = Vec(numDup, UInt(log2Ceil(PredictWidth).W))
530  }
531
532  def preds_needs_redirect_vec_dup(x: PreviousPredInfo, y: BranchPredictionBundle) = {
533    val target_diff = x.target.zip(y.getTarget).map {case (t1, t2) => t1 =/= t2 }
534    val lastBrPosOH_diff = x.lastBrPosOH.zip(y.lastBrPosOH).map {case (oh1, oh2) => oh1.asUInt =/= oh2.asUInt}
535    val taken_diff = x.taken.zip(y.taken).map {case (t1, t2) => t1 =/= t2}
536    val takenOffset_diff = x.cfiIndex.zip(y.cfiIndex).zip(x.taken).zip(y.taken).map {case (((i1, i2), xt), yt) => xt && yt && i1 =/= i2.bits}
537    VecInit(
538      for ((((tgtd, lbpohd), tkd), tod) <-
539        target_diff zip lastBrPosOH_diff zip taken_diff zip takenOffset_diff)
540        yield VecInit(tgtd, lbpohd, tkd, tod)
541      // x.shouldShiftVec.asUInt =/= y.shouldShiftVec.asUInt,
542      // x.brTaken =/= y.brTaken
543    )
544  }
545
546  // s2
547  val s2_possible_predicted_ghist_ptrs_dup = s2_ghist_ptr_dup.map(ptr => (0 to numBr).map(ptr - _.U))
548  val s2_predicted_ghist_ptr_dup = s2_possible_predicted_ghist_ptrs_dup.zip(resp.s2.lastBrPosOH).map{ case (ptr, oh) => Mux1H(oh, ptr)}
549
550  val s2_possible_predicted_fhs_dup =
551    for ((((fgh, afh), br_num_oh), full_pred) <-
552      s2_folded_gh_dup zip s2_ahead_fh_oldest_bits_dup zip s2_last_br_num_oh_dup zip resp.s2.full_pred)
553      yield (0 to numBr).map(i =>
554        fgh.update(afh, br_num_oh, i, if (i > 0) full_pred.br_taken_mask(i-1) else false.B)
555      )
556  val s2_predicted_fh_dup = resp.s2.lastBrPosOH.zip(s2_possible_predicted_fhs_dup).map{ case (oh, fh) => Mux1H(oh, fh)}
557
558  val s2_ahead_fh_ob_src_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos))
559  s2_ahead_fh_ob_src_dup.zip(s2_ghist_ptr_dup).map{ case (src, ptr) => src.read(ghv, ptr)}
560
561  if (EnableGHistDiff) {
562    val s2_predicted_ghist = WireInit(getHist(s2_predicted_ghist_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool())))
563    for (i <- 0 until numBr) {
564      when (resp.s2.shouldShiftVec(0)(i)) {
565        s2_predicted_ghist(i) := resp.s2.brTaken(0) && (i==0).B
566      }
567    }
568    when(s2_redirect_dup(0)) {
569      s0_ghist := s2_predicted_ghist.asUInt
570    }
571  }
572
573  val s2_ghv_wens = (0 until HistoryLength).map(n =>
574    (0 until numBr).map(b => (s2_ghist_ptr_dup(0)).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s2.shouldShiftVec(0)(b) && s2_redirect_dup(0)))
575  val s2_ghv_wdatas = (0 until HistoryLength).map(n =>
576    Mux1H(
577      (0 until numBr).map(b => (
578        (s2_ghist_ptr_dup(0)).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s2.shouldShiftVec(0)(b),
579        resp.s2.full_pred(0).real_br_taken_mask()(b)
580      ))
581    )
582  )
583
584  val s1_pred_info = Wire(new PreviousPredInfo)
585  s1_pred_info.target := resp.s1.getTarget
586  s1_pred_info.lastBrPosOH := resp.s1.lastBrPosOH
587  s1_pred_info.taken := resp.s1.taken
588  s1_pred_info.cfiIndex := resp.s1.cfiIndex.map{case x => x.bits}
589
590  val previous_s1_pred_info = RegEnable(s1_pred_info, init=0.U.asTypeOf(new PreviousPredInfo), s1_fire_dup(0))
591
592  val s2_redirect_s1_last_pred_vec_dup = preds_needs_redirect_vec_dup(previous_s1_pred_info, resp.s2)
593
594  for (((s2_redirect, s2_fire), s2_redirect_s1_last_pred_vec) <- s2_redirect_dup zip s2_fire_dup zip s2_redirect_s1_last_pred_vec_dup)
595    s2_redirect := s2_fire && s2_redirect_s1_last_pred_vec.reduce(_||_)
596
597
598  for (((npcGen, s2_redirect), s2_target) <- npcGen_dup zip s2_redirect_dup zip resp.s2.getTarget)
599    npcGen.register(s2_redirect, s2_target, Some("s2_target"), 5)
600  for (((foldedGhGen, s2_redirect), s2_predicted_fh) <- foldedGhGen_dup zip s2_redirect_dup zip s2_predicted_fh_dup)
601    foldedGhGen.register(s2_redirect, s2_predicted_fh, Some("s2_FGH"), 5)
602  for (((ghistPtrGen, s2_redirect), s2_predicted_ghist_ptr) <- ghistPtrGen_dup zip s2_redirect_dup zip s2_predicted_ghist_ptr_dup)
603    ghistPtrGen.register(s2_redirect, s2_predicted_ghist_ptr, Some("s2_GHPtr"), 5)
604  for (((lastBrNumOHGen, s2_redirect), s2_brPosOH) <- lastBrNumOHGen_dup zip s2_redirect_dup zip resp.s2.lastBrPosOH.map(_.asUInt))
605    lastBrNumOHGen.register(s2_redirect, s2_brPosOH, Some("s2_BrNumOH"), 5)
606  for (((aheadFhObGen, s2_redirect), s2_ahead_fh_ob_src) <- aheadFhObGen_dup zip s2_redirect_dup zip s2_ahead_fh_ob_src_dup)
607    aheadFhObGen.register(s2_redirect, s2_ahead_fh_ob_src, Some("s2_AFHOB"), 5)
608  ghvBitWriteGens.zip(s2_ghv_wens).zipWithIndex.map{case ((b, w), i) =>
609    b.register(w.reduce(_||_), s2_ghv_wdatas(i), Some(s"s2_new_bit_$i"), 5)
610  }
611
612  XSPerfAccumulate("s2_redirect_because_target_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(0))
613  XSPerfAccumulate("s2_redirect_because_branch_num_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(1))
614  XSPerfAccumulate("s2_redirect_because_direction_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(2))
615  XSPerfAccumulate("s2_redirect_because_cfi_idx_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(3))
616  // XSPerfAccumulate("s2_redirect_because_shouldShiftVec_diff", s2_fire && s2_redirect_s1_last_pred_vec(4))
617  // XSPerfAccumulate("s2_redirect_because_brTaken_diff", s2_fire && s2_redirect_s1_last_pred_vec(5))
618  XSPerfAccumulate("s2_redirect_because_fallThroughError", s2_fire_dup(0) && resp.s2.fallThruError(0))
619
620  XSPerfAccumulate("s2_redirect_when_taken", s2_redirect_dup(0) && resp.s2.taken(0) && resp.s2.full_pred(0).hit)
621  XSPerfAccumulate("s2_redirect_when_not_taken", s2_redirect_dup(0) && !resp.s2.taken(0) && resp.s2.full_pred(0).hit)
622  XSPerfAccumulate("s2_redirect_when_not_hit", s2_redirect_dup(0) && !resp.s2.full_pred(0).hit)
623
624
625  // s3
626  val s3_possible_predicted_ghist_ptrs_dup = s3_ghist_ptr_dup.map(ptr => (0 to numBr).map(ptr - _.U))
627  val s3_predicted_ghist_ptr_dup = s3_possible_predicted_ghist_ptrs_dup.zip(resp.s3.lastBrPosOH).map{ case (ptr, oh) => Mux1H(oh, ptr)}
628
629  val s3_possible_predicted_fhs_dup =
630    for ((((fgh, afh), br_num_oh), full_pred) <-
631      s3_folded_gh_dup zip s3_ahead_fh_oldest_bits_dup zip s3_last_br_num_oh_dup zip resp.s3.full_pred)
632      yield (0 to numBr).map(i =>
633        fgh.update(afh, br_num_oh, i, if (i > 0) full_pred.br_taken_mask(i-1) else false.B)
634      )
635  val s3_predicted_fh_dup = resp.s3.lastBrPosOH.zip(s3_possible_predicted_fhs_dup).map{ case (oh, fh) => Mux1H(oh, fh)}
636
637  val s3_ahead_fh_ob_src_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos))
638  s3_ahead_fh_ob_src_dup.zip(s3_ghist_ptr_dup).map{ case (src, ptr) => src.read(ghv, ptr)}
639
640  if (EnableGHistDiff) {
641    val s3_predicted_ghist = WireInit(getHist(s3_predicted_ghist_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool())))
642    for (i <- 0 until numBr) {
643      when (resp.s3.shouldShiftVec(0)(i)) {
644        s3_predicted_ghist(i) := resp.s3.brTaken(0) && (i==0).B
645      }
646    }
647    when(s3_redirect_dup(0)) {
648      s0_ghist := s3_predicted_ghist.asUInt
649    }
650  }
651
652  val s3_ghv_wens = (0 until HistoryLength).map(n =>
653    (0 until numBr).map(b => (s3_ghist_ptr_dup(0)).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s3.shouldShiftVec(0)(b) && s3_redirect_dup(0)))
654  val s3_ghv_wdatas = (0 until HistoryLength).map(n =>
655    Mux1H(
656      (0 until numBr).map(b => (
657        (s3_ghist_ptr_dup(0)).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s3.shouldShiftVec(0)(b),
658        resp.s3.full_pred(0).real_br_taken_mask()(b)
659      ))
660    )
661  )
662
663  val previous_s2_pred = RegEnable(resp.s2, init=0.U.asTypeOf(resp.s2), s2_fire_dup(0))
664
665  val s3_redirect_on_br_taken_dup = resp.s3.full_pred.zip(previous_s2_pred.full_pred).map {case (fp1, fp2) => fp1.real_br_taken_mask().asUInt =/= fp2.real_br_taken_mask().asUInt}
666  val s3_both_first_taken_dup = resp.s3.full_pred.zip(previous_s2_pred.full_pred).map {case (fp1, fp2) => fp1.real_br_taken_mask()(0) && fp2.real_br_taken_mask()(0)}
667  val s3_redirect_on_target_dup = resp.s3.getTarget.zip(previous_s2_pred.getTarget).map {case (t1, t2) => t1 =/= t2}
668  val s3_redirect_on_jalr_target_dup = resp.s3.full_pred.zip(previous_s2_pred.full_pred).map {case (fp1, fp2) => fp1.hit_taken_on_jalr && fp1.jalr_target =/= fp2.jalr_target}
669  val s3_redirect_on_fall_thru_error_dup = resp.s3.fallThruError
670
671  for ((((((s3_redirect, s3_fire), s3_redirect_on_br_taken), s3_redirect_on_target), s3_redirect_on_fall_thru_error), s3_both_first_taken) <-
672    s3_redirect_dup zip s3_fire_dup zip s3_redirect_on_br_taken_dup zip s3_redirect_on_target_dup zip s3_redirect_on_fall_thru_error_dup zip s3_both_first_taken_dup) {
673
674    s3_redirect := s3_fire && (
675      (s3_redirect_on_br_taken && !s3_both_first_taken) || s3_redirect_on_target || s3_redirect_on_fall_thru_error
676    )
677  }
678
679  XSPerfAccumulate(f"s3_redirect_on_br_taken", s3_fire_dup(0) && s3_redirect_on_br_taken_dup(0))
680  XSPerfAccumulate(f"s3_redirect_on_jalr_target", s3_fire_dup(0) && s3_redirect_on_jalr_target_dup(0))
681  XSPerfAccumulate(f"s3_redirect_on_others", s3_redirect_dup(0) && !(s3_redirect_on_br_taken_dup(0) || s3_redirect_on_jalr_target_dup(0)))
682
683  for (((npcGen, s3_redirect), s3_target) <- npcGen_dup zip s3_redirect_dup zip resp.s3.getTarget)
684    npcGen.register(s3_redirect, s3_target, Some("s3_target"), 3)
685  for (((foldedGhGen, s3_redirect), s3_predicted_fh) <- foldedGhGen_dup zip s3_redirect_dup zip s3_predicted_fh_dup)
686    foldedGhGen.register(s3_redirect, s3_predicted_fh, Some("s3_FGH"), 3)
687  for (((ghistPtrGen, s3_redirect), s3_predicted_ghist_ptr) <- ghistPtrGen_dup zip s3_redirect_dup zip s3_predicted_ghist_ptr_dup)
688    ghistPtrGen.register(s3_redirect, s3_predicted_ghist_ptr, Some("s3_GHPtr"), 3)
689  for (((lastBrNumOHGen, s3_redirect), s3_brPosOH) <- lastBrNumOHGen_dup zip s3_redirect_dup zip resp.s3.lastBrPosOH.map(_.asUInt))
690    lastBrNumOHGen.register(s3_redirect, s3_brPosOH, Some("s3_BrNumOH"), 3)
691  for (((aheadFhObGen, s3_redirect), s3_ahead_fh_ob_src) <- aheadFhObGen_dup zip s3_redirect_dup zip s3_ahead_fh_ob_src_dup)
692    aheadFhObGen.register(s3_redirect, s3_ahead_fh_ob_src, Some("s3_AFHOB"), 3)
693  ghvBitWriteGens.zip(s3_ghv_wens).zipWithIndex.map{case ((b, w), i) =>
694    b.register(w.reduce(_||_), s3_ghv_wdatas(i), Some(s"s3_new_bit_$i"), 3)
695  }
696
697  // Send signal tell Ftq override
698  val s2_ftq_idx = RegEnable(io.ftq_to_bpu.enq_ptr, s1_fire_dup(0))
699  val s3_ftq_idx = RegEnable(s2_ftq_idx, s2_fire_dup(0))
700
701  for (((to_ftq_s1_valid, s1_fire), s1_flush) <- io.bpu_to_ftq.resp.bits.s1.valid zip s1_fire_dup zip s1_flush_dup) {
702    to_ftq_s1_valid := s1_fire && !s1_flush
703  }
704  io.bpu_to_ftq.resp.bits.s1.hasRedirect.map(_ := false.B)
705  io.bpu_to_ftq.resp.bits.s1.ftq_idx := DontCare
706  for (((to_ftq_s2_valid, s2_fire), s2_flush) <- io.bpu_to_ftq.resp.bits.s2.valid zip s2_fire_dup zip s2_flush_dup) {
707    to_ftq_s2_valid := s2_fire && !s2_flush
708  }
709  io.bpu_to_ftq.resp.bits.s2.hasRedirect.zip(s2_redirect_dup).map {case (hr, r) => hr := r}
710  io.bpu_to_ftq.resp.bits.s2.ftq_idx := s2_ftq_idx
711  for (((to_ftq_s3_valid, s3_fire), s3_flush) <- io.bpu_to_ftq.resp.bits.s3.valid zip s3_fire_dup zip s3_flush_dup) {
712    to_ftq_s3_valid := s3_fire && !s3_flush
713  }
714  io.bpu_to_ftq.resp.bits.s3.hasRedirect.zip(s3_redirect_dup).map {case (hr, r) => hr := r}
715  io.bpu_to_ftq.resp.bits.s3.ftq_idx := s3_ftq_idx
716
717  predictors.io.update := RegNext(io.ftq_to_bpu.update)
718  predictors.io.update.bits.ghist := RegNext(getHist(io.ftq_to_bpu.update.bits.spec_info.histPtr))
719
720  val redirect_dup = do_redirect_dup.map(_.bits)
721  predictors.io.redirect := do_redirect_dup(0)
722
723  // Redirect logic
724  val shift_dup = redirect_dup.map(_.cfiUpdate.shift)
725  val addIntoHist_dup = redirect_dup.map(_.cfiUpdate.addIntoHist)
726  // TODO: remove these below
727  val shouldShiftVec_dup = shift_dup.map(shift => Mux(shift === 0.U, VecInit(0.U((1 << (log2Ceil(numBr) + 1)).W).asBools), VecInit((LowerMask(1.U << (shift-1.U))).asBools())))
728  // TODO end
729  val afhob_dup = redirect_dup.map(_.cfiUpdate.afhob)
730  val lastBrNumOH_dup = redirect_dup.map(_.cfiUpdate.lastBrNumOH)
731
732
733  val isBr_dup = redirect_dup.map(_.cfiUpdate.pd.isBr)
734  val taken_dup = redirect_dup.map(_.cfiUpdate.taken)
735  val real_br_taken_mask_dup =
736    for (((shift, taken), addIntoHist) <- shift_dup zip taken_dup zip addIntoHist_dup)
737      yield (0 until numBr).map(i => shift === (i+1).U && taken && addIntoHist )
738
739  val oldPtr_dup = redirect_dup.map(_.cfiUpdate.histPtr)
740  val oldFh_dup = redirect_dup.map(_.cfiUpdate.folded_hist)
741  val updated_ptr_dup = oldPtr_dup.zip(shift_dup).map {case (oldPtr, shift) => oldPtr - shift}
742  val updated_fh_dup =
743    for ((((((oldFh, afhob), lastBrNumOH), taken), addIntoHist), shift) <-
744      oldFh_dup zip afhob_dup zip lastBrNumOH_dup zip taken_dup zip addIntoHist_dup zip shift_dup)
745    yield VecInit((0 to numBr).map(i => oldFh.update(afhob, lastBrNumOH, i, taken && addIntoHist)))(shift)
746  val thisBrNumOH_dup = shift_dup.map(shift => UIntToOH(shift, numBr+1))
747  val thisAheadFhOb_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos))
748  thisAheadFhOb_dup.zip(oldPtr_dup).map {case (afhob, oldPtr) => afhob.read(ghv, oldPtr)}
749  val redirect_ghv_wens = (0 until HistoryLength).map(n =>
750    (0 until numBr).map(b => oldPtr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && shouldShiftVec_dup(0)(b) && do_redirect_dup(0).valid))
751  val redirect_ghv_wdatas = (0 until HistoryLength).map(n =>
752    Mux1H(
753      (0 until numBr).map(b => oldPtr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && shouldShiftVec_dup(0)(b)),
754      real_br_taken_mask_dup(0)
755    )
756  )
757
758  if (EnableGHistDiff) {
759    val updated_ghist = WireInit(getHist(updated_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool())))
760    for (i <- 0 until numBr) {
761      when (shift_dup(0) >= (i+1).U) {
762        updated_ghist(i) := taken_dup(0) && addIntoHist_dup(0) && (i==0).B
763      }
764    }
765    when(do_redirect_dup(0).valid) {
766      s0_ghist := updated_ghist.asUInt
767    }
768  }
769
770  // Commit time history checker
771  if (EnableCommitGHistDiff) {
772    val commitGHist = RegInit(0.U.asTypeOf(Vec(HistoryLength, Bool())))
773    val commitGHistPtr = RegInit(0.U.asTypeOf(new CGHPtr))
774    def getCommitHist(ptr: CGHPtr): UInt =
775      (Cat(commitGHist.asUInt, commitGHist.asUInt) >> (ptr.value+1.U))(HistoryLength-1, 0)
776
777    val updateValid        : Bool      = io.ftq_to_bpu.update.valid
778    val branchValidMask    : UInt      = io.ftq_to_bpu.update.bits.ftb_entry.brValids.asUInt
779    val branchCommittedMask: Vec[Bool] = io.ftq_to_bpu.update.bits.br_committed
780    val misPredictMask     : UInt      = io.ftq_to_bpu.update.bits.mispred_mask.asUInt
781    val takenMask          : UInt      =
782      io.ftq_to_bpu.update.bits.br_taken_mask.asUInt |
783        io.ftq_to_bpu.update.bits.ftb_entry.always_taken.asUInt // Always taken branch is recorded in history
784    val takenIdx       : UInt = (PriorityEncoder(takenMask) + 1.U((log2Ceil(numBr)+1).W)).asUInt
785    val misPredictIdx  : UInt = (PriorityEncoder(misPredictMask) + 1.U((log2Ceil(numBr)+1).W)).asUInt
786    val shouldShiftMask: UInt = Mux(takenMask.orR,
787        LowerMask(takenIdx).asUInt,
788        ((1 << numBr) - 1).asUInt) &
789      Mux(misPredictMask.orR,
790        LowerMask(misPredictIdx).asUInt,
791        ((1 << numBr) - 1).asUInt) &
792      branchCommittedMask.asUInt
793    val updateShift    : UInt   =
794      Mux(updateValid && branchValidMask.orR, PopCount(branchValidMask & shouldShiftMask), 0.U)
795    dontTouch(updateShift)
796    dontTouch(commitGHist)
797    dontTouch(commitGHistPtr)
798    dontTouch(takenMask)
799    dontTouch(branchValidMask)
800    dontTouch(branchCommittedMask)
801
802    // Maintain the commitGHist
803    for (i <- 0 until numBr) {
804      when(updateShift >= (i + 1).U) {
805        val ptr: CGHPtr = commitGHistPtr - i.asUInt
806        commitGHist(ptr.value) := takenMask(i)
807      }
808    }
809    when(updateValid) {
810      commitGHistPtr := commitGHistPtr - updateShift
811    }
812
813    // Calculate true history using Parallel XOR
814    def computeFoldedHist(hist: UInt, compLen: Int)(histLen: Int): UInt = {
815      if (histLen > 0) {
816        val nChunks     = (histLen + compLen - 1) / compLen
817        val hist_chunks = (0 until nChunks) map { i =>
818          hist(min((i + 1) * compLen, histLen) - 1, i * compLen)
819        }
820        ParallelXOR(hist_chunks)
821      }
822      else 0.U
823    }
824    // Do differential
825    val predictFHistAll: AllFoldedHistories = io.ftq_to_bpu.update.bits.spec_info.folded_hist
826    TageTableInfos.map {
827      case (nRows, histLen, _) => {
828        val nRowsPerBr = nRows / numBr
829        val commitTrueHist: UInt = computeFoldedHist(getCommitHist(commitGHistPtr), log2Ceil(nRowsPerBr))(histLen)
830        val predictFHist         : UInt = predictFHistAll.
831          getHistWithInfo((histLen, min(histLen, log2Ceil(nRowsPerBr)))).folded_hist
832        XSWarn(updateValid && predictFHist =/= commitTrueHist,
833          p"predict time ghist: ${predictFHist} is different from commit time: ${commitTrueHist}\n")
834      }
835    }
836  }
837
838
839  // val updatedGh = oldGh.update(shift, taken && addIntoHist)
840  for ((npcGen, do_redirect) <- npcGen_dup zip do_redirect_dup)
841    npcGen.register(do_redirect.valid, do_redirect.bits.cfiUpdate.target, Some("redirect_target"), 2)
842  for (((foldedGhGen, do_redirect), updated_fh) <- foldedGhGen_dup zip do_redirect_dup zip updated_fh_dup)
843    foldedGhGen.register(do_redirect.valid, updated_fh, Some("redirect_FGHT"), 2)
844  for (((ghistPtrGen, do_redirect), updated_ptr) <- ghistPtrGen_dup zip do_redirect_dup zip updated_ptr_dup)
845    ghistPtrGen.register(do_redirect.valid, updated_ptr, Some("redirect_GHPtr"), 2)
846  for (((lastBrNumOHGen, do_redirect), thisBrNumOH) <- lastBrNumOHGen_dup zip do_redirect_dup zip thisBrNumOH_dup)
847    lastBrNumOHGen.register(do_redirect.valid, thisBrNumOH, Some("redirect_BrNumOH"), 2)
848  for (((aheadFhObGen, do_redirect), thisAheadFhOb) <- aheadFhObGen_dup zip do_redirect_dup zip thisAheadFhOb_dup)
849    aheadFhObGen.register(do_redirect.valid, thisAheadFhOb, Some("redirect_AFHOB"), 2)
850  ghvBitWriteGens.zip(redirect_ghv_wens).zipWithIndex.map{case ((b, w), i) =>
851    b.register(w.reduce(_||_), redirect_ghv_wdatas(i), Some(s"redirect_new_bit_$i"), 2)
852  }
853  // no need to assign s0_last_pred
854
855  // val need_reset = RegNext(reset.asBool) && !reset.asBool
856
857  // Reset
858  // npcGen.register(need_reset, resetVector.U, Some("reset_pc"), 1)
859  // foldedGhGen.register(need_reset, 0.U.asTypeOf(s0_folded_gh), Some("reset_FGH"), 1)
860  // ghistPtrGen.register(need_reset, 0.U.asTypeOf(new CGHPtr), Some("reset_GHPtr"), 1)
861
862  s0_pc_dup.zip(npcGen_dup).map {case (s0_pc, npcGen) => s0_pc := npcGen()}
863  s0_folded_gh_dup.zip(foldedGhGen_dup).map {case (s0_folded_gh, foldedGhGen) => s0_folded_gh := foldedGhGen()}
864  s0_ghist_ptr_dup.zip(ghistPtrGen_dup).map {case (s0_ghist_ptr, ghistPtrGen) => s0_ghist_ptr := ghistPtrGen()}
865  s0_ahead_fh_oldest_bits_dup.zip(aheadFhObGen_dup).map {case (s0_ahead_fh_oldest_bits, aheadFhObGen) =>
866    s0_ahead_fh_oldest_bits := aheadFhObGen()}
867  s0_last_br_num_oh_dup.zip(lastBrNumOHGen_dup).map {case (s0_last_br_num_oh, lastBrNumOHGen) =>
868    s0_last_br_num_oh := lastBrNumOHGen()}
869  (ghv_write_datas zip ghvBitWriteGens).map{case (wd, d) => wd := d()}
870  for (i <- 0 until HistoryLength) {
871    ghv_wens(i) := Seq(s1_ghv_wens, s2_ghv_wens, s3_ghv_wens, redirect_ghv_wens).map(_(i).reduce(_||_)).reduce(_||_)
872    when (ghv_wens(i)) {
873      ghv(i) := ghv_write_datas(i)
874    }
875  }
876
877  // TODO: signals for memVio and other Redirects
878  controlRedirectBubble := do_redirect_dup(0).valid && do_redirect_dup(0).bits.ControlRedirectBubble
879  ControlBTBMissBubble := do_redirect_dup(0).bits.ControlBTBMissBubble
880  TAGEMissBubble := do_redirect_dup(0).bits.TAGEMissBubble
881  SCMissBubble := do_redirect_dup(0).bits.SCMissBubble
882  ITTAGEMissBubble := do_redirect_dup(0).bits.ITTAGEMissBubble
883  RASMissBubble := do_redirect_dup(0).bits.RASMissBubble
884
885  memVioRedirectBubble := do_redirect_dup(0).valid && do_redirect_dup(0).bits.MemVioRedirectBubble
886  otherRedirectBubble := do_redirect_dup(0).valid && do_redirect_dup(0).bits.OtherRedirectBubble
887  btbMissBubble := do_redirect_dup(0).valid && do_redirect_dup(0).bits.BTBMissBubble
888  overrideBubble(0) := s2_redirect_dup(0)
889  overrideBubble(1) := s3_redirect_dup(0)
890  ftqUpdateBubble(0) := !s1_components_ready_dup(0)
891  ftqUpdateBubble(1) := !s2_components_ready_dup(0)
892  ftqUpdateBubble(2) := !s3_components_ready_dup(0)
893  ftqFullStall := !io.bpu_to_ftq.resp.ready
894  io.bpu_to_ftq.resp.bits.topdown_info := topdown_stages(numOfStage - 1)
895
896  // topdown handling logic here
897  when (controlRedirectBubble) {
898    /*
899    for (i <- 0 until numOfStage)
900      topdown_stages(i).reasons(TopDownCounters.ControlRedirectBubble.id) := true.B
901    io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.ControlRedirectBubble.id) := true.B
902    */
903    when (ControlBTBMissBubble) {
904      for (i <- 0 until numOfStage)
905        topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id) := true.B
906      io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.BTBMissBubble.id) := true.B
907    } .elsewhen (TAGEMissBubble) {
908      for (i <- 0 until numOfStage)
909        topdown_stages(i).reasons(TopDownCounters.TAGEMissBubble.id) := true.B
910      io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.TAGEMissBubble.id) := true.B
911    } .elsewhen (SCMissBubble) {
912      for (i <- 0 until numOfStage)
913        topdown_stages(i).reasons(TopDownCounters.SCMissBubble.id) := true.B
914      io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.SCMissBubble.id) := true.B
915    } .elsewhen (ITTAGEMissBubble) {
916      for (i <- 0 until numOfStage)
917        topdown_stages(i).reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B
918      io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B
919    } .elsewhen (RASMissBubble) {
920      for (i <- 0 until numOfStage)
921        topdown_stages(i).reasons(TopDownCounters.RASMissBubble.id) := true.B
922      io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.RASMissBubble.id) := true.B
923    }
924  }
925  when (memVioRedirectBubble) {
926    for (i <- 0 until numOfStage)
927      topdown_stages(i).reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B
928    io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B
929  }
930  when (otherRedirectBubble) {
931    for (i <- 0 until numOfStage)
932      topdown_stages(i).reasons(TopDownCounters.OtherRedirectBubble.id) := true.B
933    io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.OtherRedirectBubble.id) := true.B
934  }
935  when (btbMissBubble) {
936    for (i <- 0 until numOfStage)
937      topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id) := true.B
938    io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.BTBMissBubble.id) := true.B
939  }
940
941  for (i <- 0 until numOfStage) {
942    if (i < numOfStage - overrideStage) {
943      when (overrideBubble(i)) {
944        for (j <- 0 to i)
945          topdown_stages(j).reasons(TopDownCounters.OverrideBubble.id) := true.B
946      }
947    }
948    if (i < numOfStage - ftqUpdateStage) {
949      when (ftqUpdateBubble(i)) {
950        topdown_stages(i).reasons(TopDownCounters.FtqUpdateBubble.id) := true.B
951      }
952    }
953  }
954  when (ftqFullStall) {
955    topdown_stages(0).reasons(TopDownCounters.FtqFullStall.id) := true.B
956  }
957
958  XSError(isBefore(redirect_dup(0).cfiUpdate.histPtr, s3_ghist_ptr_dup(0)) && do_redirect_dup(0).valid,
959    p"s3_ghist_ptr ${s3_ghist_ptr_dup(0)} exceeds redirect histPtr ${redirect_dup(0).cfiUpdate.histPtr}\n")
960  XSError(isBefore(redirect_dup(0).cfiUpdate.histPtr, s2_ghist_ptr_dup(0)) && do_redirect_dup(0).valid,
961    p"s2_ghist_ptr ${s2_ghist_ptr_dup(0)} exceeds redirect histPtr ${redirect_dup(0).cfiUpdate.histPtr}\n")
962  XSError(isBefore(redirect_dup(0).cfiUpdate.histPtr, s1_ghist_ptr_dup(0)) && do_redirect_dup(0).valid,
963    p"s1_ghist_ptr ${s1_ghist_ptr_dup(0)} exceeds redirect histPtr ${redirect_dup(0).cfiUpdate.histPtr}\n")
964
965  XSDebug(RegNext(reset.asBool) && !reset.asBool, "Reseting...\n")
966  XSDebug(io.ftq_to_bpu.update.valid, p"Update from ftq\n")
967  XSDebug(io.ftq_to_bpu.redirect.valid, p"Redirect from ftq\n")
968
969  XSDebug("[BP0]                 fire=%d                      pc=%x\n", s0_fire_dup(0), s0_pc_dup(0))
970  XSDebug("[BP1] v=%d r=%d cr=%d fire=%d             flush=%d pc=%x\n",
971    s1_valid_dup(0), s1_ready_dup(0), s1_components_ready_dup(0), s1_fire_dup(0), s1_flush_dup(0), s1_pc)
972  XSDebug("[BP2] v=%d r=%d cr=%d fire=%d redirect=%d flush=%d pc=%x\n",
973    s2_valid_dup(0), s2_ready_dup(0), s2_components_ready_dup(0), s2_fire_dup(0), s2_redirect_dup(0), s2_flush_dup(0), s2_pc)
974  XSDebug("[BP3] v=%d r=%d cr=%d fire=%d redirect=%d flush=%d pc=%x\n",
975    s3_valid_dup(0), s3_ready_dup(0), s3_components_ready_dup(0), s3_fire_dup(0), s3_redirect_dup(0), s3_flush_dup(0), s3_pc)
976  XSDebug("[FTQ] ready=%d\n", io.bpu_to_ftq.resp.ready)
977  XSDebug("resp.s1.target=%x\n", resp.s1.getTarget(0))
978  XSDebug("resp.s2.target=%x\n", resp.s2.getTarget(0))
979  // XSDebug("s0_ghist: %b\n", s0_ghist.predHist)
980  // XSDebug("s1_ghist: %b\n", s1_ghist.predHist)
981  // XSDebug("s2_ghist: %b\n", s2_ghist.predHist)
982  // XSDebug("s2_predicted_ghist: %b\n", s2_predicted_ghist.predHist)
983  XSDebug(p"s0_ghist_ptr: ${s0_ghist_ptr_dup(0)}\n")
984  XSDebug(p"s1_ghist_ptr: ${s1_ghist_ptr_dup(0)}\n")
985  XSDebug(p"s2_ghist_ptr: ${s2_ghist_ptr_dup(0)}\n")
986  XSDebug(p"s3_ghist_ptr: ${s3_ghist_ptr_dup(0)}\n")
987
988  io.ftq_to_bpu.update.bits.display(io.ftq_to_bpu.update.valid)
989  io.ftq_to_bpu.redirect.bits.display(io.ftq_to_bpu.redirect.valid)
990
991
992  XSPerfAccumulate("s2_redirect", s2_redirect_dup(0))
993  XSPerfAccumulate("s3_redirect", s3_redirect_dup(0))
994  XSPerfAccumulate("s1_not_valid", !s1_valid_dup(0))
995
996  val perfEvents = predictors.asInstanceOf[Composer].getPerfEvents
997  generatePerfEvent()
998}
999