xref: /XiangShan/src/main/scala/xiangshan/frontend/BPU.scala (revision b37e4b45da2333608f12413931aecdaef46443e4)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.frontend
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.experimental.chiselName
22import chisel3.util._
23import xiangshan._
24import utils._
25
26import scala.math.min
27
28trait HasBPUConst extends HasXSParameter {
29  val MaxMetaLength = 512 // TODO: Reduce meta length
30  val MaxBasicBlockSize = 32
31  val LHistoryLength = 32
32  // val numBr = 2
33  val useBPD = true
34  val useLHist = true
35  val numBrSlot = numBr-1
36  val totalSlot = numBrSlot + 1
37
38  def BP_STAGES = (0 until 2).map(_.U(2.W))
39  def BP_S1 = BP_STAGES(0)
40  def BP_S2 = BP_STAGES(1)
41  // def BP_S3 = BP_STAGES(2)
42  val numBpStages = BP_STAGES.length
43
44  val debug = true
45  val resetVector = 0x10000000L
46  // TODO: Replace log2Up by log2Ceil
47}
48
49trait HasBPUParameter extends HasXSParameter with HasBPUConst {
50  val BPUDebug = true && !env.FPGAPlatform && env.EnablePerfDebug
51  val EnableCFICommitLog = true
52  val EnbaleCFIPredLog = true
53  val EnableBPUTimeRecord = (EnableCFICommitLog || EnbaleCFIPredLog) && !env.FPGAPlatform
54  val EnableCommit = false
55}
56
57class BPUCtrl(implicit p: Parameters) extends XSBundle {
58  val ubtb_enable = Bool()
59  val btb_enable  = Bool()
60  val bim_enable  = Bool()
61  val tage_enable = Bool()
62  val sc_enable   = Bool()
63  val ras_enable  = Bool()
64  val loop_enable = Bool()
65}
66
67trait BPUUtils extends HasXSParameter {
68  // circular shifting
69  def circularShiftLeft(source: UInt, len: Int, shamt: UInt): UInt = {
70    val res = Wire(UInt(len.W))
71    val higher = source << shamt
72    val lower = source >> (len.U - shamt)
73    res := higher | lower
74    res
75  }
76
77  def circularShiftRight(source: UInt, len: Int, shamt: UInt): UInt = {
78    val res = Wire(UInt(len.W))
79    val higher = source << (len.U - shamt)
80    val lower = source >> shamt
81    res := higher | lower
82    res
83  }
84
85  // To be verified
86  def satUpdate(old: UInt, len: Int, taken: Bool): UInt = {
87    val oldSatTaken = old === ((1 << len)-1).U
88    val oldSatNotTaken = old === 0.U
89    Mux(oldSatTaken && taken, ((1 << len)-1).U,
90      Mux(oldSatNotTaken && !taken, 0.U,
91        Mux(taken, old + 1.U, old - 1.U)))
92  }
93
94  def signedSatUpdate(old: SInt, len: Int, taken: Bool): SInt = {
95    val oldSatTaken = old === ((1 << (len-1))-1).S
96    val oldSatNotTaken = old === (-(1 << (len-1))).S
97    Mux(oldSatTaken && taken, ((1 << (len-1))-1).S,
98      Mux(oldSatNotTaken && !taken, (-(1 << (len-1))).S,
99        Mux(taken, old + 1.S, old - 1.S)))
100  }
101
102  def getFallThroughAddr(start: UInt, carry: Bool, pft: UInt) = {
103    val higher = start.head(VAddrBits-log2Ceil(PredictWidth)-instOffsetBits-1)
104    Cat(Mux(carry, higher+1.U, higher), pft, 0.U(instOffsetBits.W))
105  }
106
107  def foldTag(tag: UInt, l: Int): UInt = {
108    val nChunks = (tag.getWidth + l - 1) / l
109    val chunks = (0 until nChunks).map { i =>
110      tag(min((i+1)*l, tag.getWidth)-1, i*l)
111    }
112    ParallelXOR(chunks)
113  }
114}
115
116// class BranchPredictionUpdate(implicit p: Parameters) extends XSBundle with HasBPUConst {
117//   val pc = UInt(VAddrBits.W)
118//   val br_offset = Vec(num_br, UInt(log2Up(MaxBasicBlockSize).W))
119//   val br_mask = Vec(MaxBasicBlockSize, Bool())
120//
121//   val jmp_valid = Bool()
122//   val jmp_type = UInt(3.W)
123//
124//   val is_NextMask = Vec(FetchWidth*2, Bool())
125//
126//   val cfi_idx = Valid(UInt(log2Ceil(MaxBasicBlockSize).W))
127//   val cfi_mispredict = Bool()
128//   val cfi_is_br = Bool()
129//   val cfi_is_jal = Bool()
130//   val cfi_is_jalr = Bool()
131//
132//   val ghist = new ShiftingGlobalHistory()
133//
134//   val target = UInt(VAddrBits.W)
135//
136//   val meta = UInt(MaxMetaLength.W)
137//   val spec_meta = UInt(MaxMetaLength.W)
138//
139//   def taken = cfi_idx.valid
140// }
141
142class AllFoldedHistories(val gen: Seq[Tuple2[Int, Int]])(implicit p: Parameters) extends XSBundle with HasBPUConst {
143  val hist = MixedVec(gen.map{case (l, cl) => new FoldedHistory(l, cl, numBr)})
144  // println(gen.mkString)
145  require(gen.toSet.toList.equals(gen))
146  def getHistWithInfo(info: Tuple2[Int, Int]) = {
147    val selected = hist.filter(_.info.equals(info))
148    require(selected.length == 1)
149    selected(0)
150  }
151  def autoConnectFrom(that: AllFoldedHistories) = {
152    require(this.hist.length <= that.hist.length)
153    for (h <- this.hist) {
154      h := that.getHistWithInfo(h.info)
155    }
156  }
157  def update(ghv: Vec[Bool], ptr: CGHPtr, shift: Int, taken: Bool): AllFoldedHistories = {
158    val res = WireInit(this)
159    for (i <- 0 until this.hist.length) {
160      res.hist(i) := this.hist(i).update(ghv, ptr, shift, taken)
161    }
162    res
163  }
164
165  def display(cond: Bool) = {
166    for (h <- hist) {
167      XSDebug(cond, p"hist len ${h.len}, folded len ${h.compLen}, value ${Binary(h.folded_hist)}\n")
168    }
169  }
170}
171
172class BasePredictorInput (implicit p: Parameters) extends XSBundle with HasBPUConst {
173  def nInputs = 1
174
175  val s0_pc = UInt(VAddrBits.W)
176
177  val folded_hist = new AllFoldedHistories(foldedGHistInfos)
178  val ghr = UInt(UbtbGHRLength.W)
179
180  val resp_in = Vec(nInputs, new BranchPredictionResp)
181
182  // val final_preds = Vec(numBpStages, new)
183  // val toFtq_fire = Bool()
184
185  // val s0_all_ready = Bool()
186}
187
188class BasePredictorOutput (implicit p: Parameters) extends XSBundle with HasBPUConst {
189  val last_stage_meta = UInt(MaxMetaLength.W) // This is use by composer
190  val resp = new BranchPredictionResp
191
192  // These store in meta, extract in composer
193  // val rasSp = UInt(log2Ceil(RasSize).W)
194  // val rasTop = new RASEntry
195  // val specCnt = Vec(PredictWidth, UInt(10.W))
196}
197
198class BasePredictorIO (implicit p: Parameters) extends XSBundle with HasBPUConst {
199  val in  = Flipped(DecoupledIO(new BasePredictorInput)) // TODO: Remove DecoupledIO
200  // val out = DecoupledIO(new BasePredictorOutput)
201  val out = Output(new BasePredictorOutput)
202  // val flush_out = Valid(UInt(VAddrBits.W))
203
204  // val ctrl = Input(new BPUCtrl())
205
206  val s0_fire = Input(Bool())
207  val s1_fire = Input(Bool())
208  val s2_fire = Input(Bool())
209
210  val s1_ready = Output(Bool())
211  val s2_ready = Output(Bool())
212
213  val update = Flipped(Valid(new BranchPredictionUpdate))
214  val redirect = Flipped(Valid(new BranchPredictionRedirect))
215}
216
217abstract class BasePredictor(implicit p: Parameters) extends XSModule with HasBPUConst with BPUUtils {
218  val meta_size = 0
219  val spec_meta_size = 0
220  val io = IO(new BasePredictorIO())
221
222  io.out.resp := io.in.bits.resp_in(0)
223
224  io.out.last_stage_meta := 0.U
225
226  io.in.ready := !io.redirect.valid
227
228  io.s1_ready := true.B
229  io.s2_ready := true.B
230
231  val s0_pc       = WireInit(io.in.bits.s0_pc) // fetchIdx(io.f0_pc)
232  val s1_pc       = RegEnable(s0_pc, resetVector.U, io.s0_fire)
233  val s2_pc       = RegEnable(s1_pc, io.s1_fire)
234
235  io.out.resp.s1.pc := s1_pc
236  io.out.resp.s2.pc := s2_pc
237
238
239  def getFoldedHistoryInfo: Option[Set[FoldedHistoryInfo]] = None
240}
241
242class FakePredictor(implicit p: Parameters) extends BasePredictor {
243  io.in.ready                 := true.B
244  io.out.last_stage_meta              := 0.U
245  io.out.resp := io.in.bits.resp_in(0)
246}
247
248class BpuToFtqIO(implicit p: Parameters) extends XSBundle {
249  val resp = DecoupledIO(new BpuToFtqBundle())
250}
251
252class PredictorIO(implicit p: Parameters) extends XSBundle {
253  val bpu_to_ftq = new BpuToFtqIO()
254  val ftq_to_bpu = Flipped(new FtqToBpuIO())
255}
256
257@chiselName
258class Predictor(implicit p: Parameters) extends XSModule with HasBPUConst with HasPerfEvents {
259  val io = IO(new PredictorIO)
260
261  val predictors = Module(if (useBPD) new Composer else new FakePredictor)
262
263  val folded_hist_infos = predictors.getFoldedHistoryInfo.getOrElse(Set()).toList
264  for ((len, compLen) <- folded_hist_infos) {
265    println(f"folded hist info: len $len, compLen $compLen")
266  }
267
268  val s0_fire, s1_fire, s2_fire = Wire(Bool())
269  val s1_valid, s2_valid = RegInit(false.B)
270  val s1_ready, s2_ready = Wire(Bool())
271  val s1_components_ready, s2_components_ready = Wire(Bool())
272
273  val s0_pc = WireInit(resetVector.U)
274  val s0_pc_reg = RegNext(s0_pc, init=resetVector.U)
275  val s1_pc = RegEnable(s0_pc, s0_fire)
276  val s2_pc = RegEnable(s1_pc, s1_fire)
277
278  val s0_folded_gh = Wire(new AllFoldedHistories(foldedGHistInfos))
279  val s0_folded_gh_reg = RegNext(s0_folded_gh, init=0.U.asTypeOf(s0_folded_gh))
280  val s1_folded_gh = RegEnable(s0_folded_gh, 0.U.asTypeOf(s0_folded_gh), s0_fire)
281  val s2_folded_gh = RegEnable(s1_folded_gh, 0.U.asTypeOf(s0_folded_gh), s1_fire)
282
283  val npcGen   = new PhyPriorityMuxGenerator[UInt]
284  val foldedGhGen = new PhyPriorityMuxGenerator[AllFoldedHistories]
285  val ghistPtrGen = new PhyPriorityMuxGenerator[CGHPtr]
286  val ghvBitWriteGens = Seq.tabulate(HistoryLength)(n => new PhyPriorityMuxGenerator[Bool])
287  val ghrGen = new PhyPriorityMuxGenerator[UInt]
288
289  val ghv = RegInit(0.U.asTypeOf(Vec(HistoryLength, Bool())))
290  val ghv_wire = WireInit(ghv)
291
292  val ghv_write_datas = Wire(Vec(HistoryLength, Bool()))
293  val ghv_wens = Wire(Vec(HistoryLength, Bool()))
294
295  val s0_ghist_ptr = Wire(new CGHPtr)
296  val s0_ghist_ptr_reg = RegNext(s0_ghist_ptr, init=0.U.asTypeOf(new CGHPtr))
297  val s1_ghist_ptr = RegEnable(s0_ghist_ptr, 0.U.asTypeOf(new CGHPtr), s0_fire)
298  val s2_ghist_ptr = RegEnable(s1_ghist_ptr, 0.U.asTypeOf(new CGHPtr), s1_fire)
299
300  val s0_ghr = Wire(UInt(UbtbGHRLength.W))
301  val s0_ghr_reg = RegNext(s0_ghr, init=0.U(UbtbGHRLength.W))
302  val s1_ghr = RegEnable(s0_ghr, 0.U(UbtbGHRLength.W), s0_fire)
303  val s2_ghr = RegEnable(s1_ghr, 0.U(UbtbGHRLength.W), s1_fire)
304
305  val resp = predictors.io.out.resp
306
307
308  val toFtq_fire = io.bpu_to_ftq.resp.valid && io.bpu_to_ftq.resp.ready
309
310  val s1_flush, s2_flush = Wire(Bool())
311  val s2_redirect = Wire(Bool())
312
313  // predictors.io := DontCare
314  predictors.io.in.valid := s0_fire
315  predictors.io.in.bits.s0_pc := s0_pc
316  predictors.io.in.bits.folded_hist := s0_folded_gh
317  predictors.io.in.bits.ghr := s0_ghr
318  predictors.io.in.bits.resp_in(0) := (0.U).asTypeOf(new BranchPredictionResp)
319  // predictors.io.in.bits.resp_in(0).s1.pc := s0_pc
320  // predictors.io.in.bits.toFtq_fire := toFtq_fire
321
322  // predictors.io.out.ready := io.bpu_to_ftq.resp.ready
323
324  // Pipeline logic
325  s2_redirect := false.B
326
327  s2_flush := io.ftq_to_bpu.redirect.valid
328  s1_flush := s2_flush || s2_redirect
329
330  s1_components_ready := predictors.io.s1_ready
331  s1_ready := s1_fire || !s1_valid
332  s0_fire := !reset.asBool && s1_components_ready && s1_ready
333  predictors.io.s0_fire := s0_fire
334
335  s2_components_ready := predictors.io.s2_ready
336  s2_ready := s2_fire || !s2_valid
337  s1_fire := s1_valid && s2_components_ready && s2_ready && io.bpu_to_ftq.resp.ready
338
339  when(s0_fire)         { s1_valid := true.B  }
340    .elsewhen(s1_flush) { s1_valid := false.B }
341    .elsewhen(s1_fire)  { s1_valid := false.B }
342
343  predictors.io.s1_fire := s1_fire
344
345  s2_fire := s2_valid
346
347  when(s2_flush)       { s2_valid := false.B }
348    .elsewhen(s1_fire) { s2_valid := !s1_flush  }
349    .elsewhen(s2_fire) { s2_valid := false.B }
350
351  predictors.io.s2_fire := s2_fire
352
353
354  io.bpu_to_ftq.resp.valid :=
355    s1_valid && s2_components_ready && s2_ready ||
356    s2_fire && s2_redirect
357  io.bpu_to_ftq.resp.bits  := BpuToFtqBundle(predictors.io.out.resp)
358  io.bpu_to_ftq.resp.bits.meta  := predictors.io.out.last_stage_meta // TODO: change to lastStageMeta
359  io.bpu_to_ftq.resp.bits.s2.folded_hist := s2_folded_gh
360  io.bpu_to_ftq.resp.bits.s2.histPtr := s2_ghist_ptr
361  io.bpu_to_ftq.resp.bits.s2.ghr := s2_ghr
362
363  npcGen.register(true.B, s0_pc_reg, Some("stallPC"), 0)
364  foldedGhGen.register(true.B, s0_folded_gh_reg, Some("stallFGH"), 0)
365  ghistPtrGen.register(true.B, s0_ghist_ptr_reg, Some("stallGHPtr"), 0)
366  ghrGen.register(true.B, s0_ghr_reg, Some("stallGHR"), 0)
367
368  // History manage
369  // s1
370  val s1_possible_predicted_ghist_ptrs = (0 to numBr).map(s1_ghist_ptr - _.U)
371  val s1_predicted_ghist_ptr = Mux1H(resp.s1.lastBrPosOH, s1_possible_predicted_ghist_ptrs)
372
373  val s1_possible_predicted_fhs = (0 to numBr).map(i =>
374    s1_folded_gh.update(ghv, s1_ghist_ptr, i, if (i > 0) resp.s1.taken && resp.s1.lastBrPosOH(i-1) else false.B))
375  val s1_predicted_fh = Mux1H(resp.s1.lastBrPosOH, s1_possible_predicted_fhs)
376
377  val s1_possible_predicted_ghrs = (0 to numBr).map(i => (s1_ghr << i) | resp.s1.brTaken)
378  val s1_predicted_ghr = Mux1H(resp.s1.lastBrPosOH, s1_possible_predicted_ghrs)
379
380  require(isPow2(HistoryLength))
381  val s1_ghv_wens = (0 until HistoryLength).map(n =>
382    (0 until numBr).map(b => (s1_ghist_ptr).value === n.U(log2Ceil(HistoryLength).W) + b.U && resp.s1.shouldShiftVec(b) && s1_valid))
383  val s1_ghv_wdatas = (0 until HistoryLength).map(n =>
384    Mux1H(
385      (0 until numBr).map(b => (
386        (s1_ghist_ptr).value === n.U(log2Ceil(HistoryLength).W) + b.U && resp.s1.shouldShiftVec(b),
387        resp.s1.brTaken && resp.s1.lastBrPosOH(b+1)
388      ))
389    )
390  )
391
392
393  npcGen.register(s1_valid, resp.s1.getTarget, Some("s1_target"), 5)
394  foldedGhGen.register(s1_valid, s1_predicted_fh, Some("s1_FGH"), 5)
395  ghistPtrGen.register(s1_valid, s1_predicted_ghist_ptr, Some("s1_GHPtr"), 5)
396  ghrGen.register(s1_valid, s1_predicted_ghr, Some("s1_GHR"), 5)
397  ghvBitWriteGens.zip(s1_ghv_wens).zipWithIndex.map{case ((b, w), i) =>
398    b.register(w.reduce(_||_), s1_ghv_wdatas(i), Some(s"s1_new_bit_$i"), 5)
399  }
400
401  def preds_needs_redirect_vec(x: BranchPredictionBundle, y: BranchPredictionBundle) = {
402    VecInit(
403      x.getTarget =/= y.getTarget,
404      x.lastBrPosOH.asUInt =/= y.lastBrPosOH.asUInt,
405      x.taken =/= y.taken,
406      (x.taken && y.taken) && x.cfiIndex.bits =/= y.cfiIndex.bits
407    )
408  }
409
410  // s2
411  val s2_possible_predicted_ghist_ptrs = (0 to numBr).map(s2_ghist_ptr - _.U)
412  val s2_predicted_ghist_ptr = Mux1H(resp.s2.lastBrPosOH, s2_possible_predicted_ghist_ptrs)
413
414  val s2_possible_predicted_fhs = (0 to numBr).map(i =>
415    s2_folded_gh.update(ghv, s2_ghist_ptr, i, if (i > 0) resp.s2.full_pred.br_taken_mask(i-1) else false.B))
416  val s2_predicted_fh = Mux1H(resp.s2.lastBrPosOH, s2_possible_predicted_fhs)
417
418  val s2_possible_predicted_ghrs = (0 to numBr).map(i => (s2_ghr << i) | resp.s2.brTaken)
419  val s2_predicted_ghr = Mux1H(resp.s2.lastBrPosOH, s2_possible_predicted_ghrs)
420
421  val s2_ghv_wens = (0 until HistoryLength).map(n =>
422    (0 until numBr).map(b => (s2_ghist_ptr).value === n.U(log2Ceil(HistoryLength).W) + b.U && resp.s2.shouldShiftVec(b) && s2_redirect))
423  val s2_ghv_wdatas = (0 until HistoryLength).map(n =>
424    Mux1H(
425      (0 until numBr).map(b => (
426        (s2_ghist_ptr).value === n.U(log2Ceil(HistoryLength).W) + b.U && resp.s2.shouldShiftVec(b),
427        resp.s2.brTaken && resp.s2.lastBrPosOH(b+1)
428      ))
429    )
430  )
431
432  val previous_s1_pred = RegEnable(resp.s1, init=0.U.asTypeOf(resp.s1), s1_fire)
433
434  val s2_redirect_s1_last_pred_vec = preds_needs_redirect_vec(previous_s1_pred, resp.s2)
435
436  s2_redirect := s2_fire && (s2_redirect_s1_last_pred_vec.reduce(_||_) || resp.s2.fallThruError)
437
438  // when(s2_redirect) { ghist_update(s2_ghist_ptr, resp.s2) }
439  npcGen.register(s2_redirect, resp.s2.getTarget, Some("s2_target"), 4)
440  foldedGhGen.register(s2_redirect, s2_predicted_fh, Some("s2_FGH"), 4)
441  ghistPtrGen.register(s2_redirect, s2_predicted_ghist_ptr, Some("s2_GHPtr"), 4)
442  ghrGen.register(s2_redirect, s2_predicted_ghr, Some("s2_GHR"), 4)
443  ghvBitWriteGens.zip(s2_ghv_wens).zipWithIndex.map{case ((b, w), i) =>
444    b.register(w.reduce(_||_), s2_ghv_wdatas(i), Some(s"s2_new_bit_$i"), 4)
445  }
446
447  XSPerfAccumulate("s2_redirect_because_target_diff", s2_fire && s2_redirect_s1_last_pred_vec(0))
448  XSPerfAccumulate("s2_redirect_because_branch_num_diff", s2_fire && s2_redirect_s1_last_pred_vec(1))
449  XSPerfAccumulate("s2_redirect_because_direction_diff", s2_fire && s2_redirect_s1_last_pred_vec(2))
450  XSPerfAccumulate("s2_redirect_because_cfi_idx_diff", s2_fire && s2_redirect_s1_last_pred_vec(3))
451
452
453  // Send signal tell Ftq override
454  val s2_ftq_idx = RegEnable(io.ftq_to_bpu.enq_ptr, s1_fire)
455
456  io.bpu_to_ftq.resp.bits.s1.valid := s1_fire && !s1_flush
457  io.bpu_to_ftq.resp.bits.s1.hasRedirect := false.B
458  io.bpu_to_ftq.resp.bits.s1.ftq_idx := DontCare
459  io.bpu_to_ftq.resp.bits.s2.valid := s2_fire && !s2_flush
460  io.bpu_to_ftq.resp.bits.s2.hasRedirect := s2_redirect
461  io.bpu_to_ftq.resp.bits.s2.ftq_idx := s2_ftq_idx
462
463  val redirect = io.ftq_to_bpu.redirect.bits
464
465  predictors.io.update := io.ftq_to_bpu.update
466  predictors.io.redirect := io.ftq_to_bpu.redirect
467
468  // Redirect logic
469  val shift = redirect.cfiUpdate.shift
470  val addIntoHist = redirect.cfiUpdate.addIntoHist
471  // TODO: remove these below
472  val shouldShiftVec = Mux(shift === 0.U, VecInit(0.U((1 << (log2Ceil(numBr) + 1)).W).asBools), VecInit((LowerMask(1.U << (shift-1.U))).asBools()))
473  // TODO end
474
475  val isBr = redirect.cfiUpdate.pd.isBr
476  val taken = redirect.cfiUpdate.taken
477  val real_br_taken_mask = (0 until numBr).map(i => shift === (i+1).U && taken && addIntoHist )
478
479  val oldPtr = redirect.cfiUpdate.histPtr
480  val oldFh = redirect.cfiUpdate.folded_hist
481  val oldGhr = redirect.cfiUpdate.ghr
482  val updated_ptr = oldPtr - shift
483  val updated_fh = VecInit((0 to numBr).map(i => oldFh.update(ghv, oldPtr, i, taken && addIntoHist)))(shift)
484  val updated_ghr = (oldGhr << shift) | real_br_taken_mask.reduce(_||_)
485  val redirect_ghv_wens = (0 until HistoryLength).map(n =>
486    (0 until numBr).map(b => oldPtr.value === (n.U(log2Ceil(HistoryLength).W) + b.U) && shouldShiftVec(b) && io.ftq_to_bpu.redirect.valid))
487  val redirect_ghv_wdatas = (0 until HistoryLength).map(n =>
488    Mux1H(
489      (0 until numBr).map(b => oldPtr.value === (n.U(log2Ceil(HistoryLength).W) + b.U) && shouldShiftVec(b)),
490      real_br_taken_mask
491    )
492  )
493
494
495  // val updatedGh = oldGh.update(shift, taken && addIntoHist)
496
497  // when(io.ftq_to_bpu.redirect.valid) { ghist_update(oldPtr, shift, taken && addIntoHist) }
498  npcGen.register(io.ftq_to_bpu.redirect.valid, redirect.cfiUpdate.target, Some("redirect_target"), 2)
499  foldedGhGen.register(io.ftq_to_bpu.redirect.valid, updated_fh, Some("redirect_FGHT"), 2)
500  ghistPtrGen.register(io.ftq_to_bpu.redirect.valid, updated_ptr, Some("redirect_GHPtr"), 2)
501  ghrGen.register(io.ftq_to_bpu.redirect.valid, updated_ghr, Some("redirect_GHR"), 2)
502  ghvBitWriteGens.zip(redirect_ghv_wens).zipWithIndex.map{case ((b, w), i) =>
503    b.register(w.reduce(_||_), redirect_ghv_wdatas(i), Some(s"redirect_new_bit_$i"), 2)
504  }
505  // no need to assign s0_last_pred
506
507  val need_reset = RegNext(reset.asBool) && !reset.asBool
508
509  // Reset
510  npcGen.register(need_reset, resetVector.U, Some("reset_pc"), 1)
511  foldedGhGen.register(need_reset, 0.U.asTypeOf(s0_folded_gh), Some("reset_FGH"), 1)
512  ghistPtrGen.register(need_reset, 0.U.asTypeOf(new CGHPtr), Some("reset_GHPtr"), 1)
513  ghrGen.register(need_reset, 0.U(UbtbGHRLength.W), Some("reset_GHR"), 1)
514
515  s0_pc         := npcGen()
516  s0_pc_reg     := s0_pc
517  s0_folded_gh  := foldedGhGen()
518  s0_ghist_ptr  := ghistPtrGen()
519  s0_ghr        := ghrGen()
520  (ghv_write_datas zip ghvBitWriteGens).map{case (wd, d) => wd := d()}
521  for (i <- 0 until HistoryLength) {
522    ghv_wens(i) := Seq(s1_ghv_wens, s2_ghv_wens, redirect_ghv_wens).map(_(i).reduce(_||_)).reduce(_||_)
523    when (ghv_wens(i)) {
524      ghv(i) := ghv_write_datas(i)
525    }
526  }
527
528  XSDebug(RegNext(reset.asBool) && !reset.asBool, "Reseting...\n")
529  XSDebug(io.ftq_to_bpu.update.valid, p"Update from ftq\n")
530  XSDebug(io.ftq_to_bpu.redirect.valid, p"Redirect from ftq\n")
531
532  XSDebug("[BP0]                 fire=%d                      pc=%x\n", s0_fire, s0_pc)
533  XSDebug("[BP1] v=%d r=%d cr=%d fire=%d             flush=%d pc=%x\n",
534    s1_valid, s1_ready, s1_components_ready, s1_fire, s1_flush, s1_pc)
535  XSDebug("[BP2] v=%d r=%d cr=%d fire=%d redirect=%d flush=%d pc=%x\n",
536  s2_valid, s2_ready, s2_components_ready, s2_fire, s2_redirect, s2_flush, s2_pc)
537  XSDebug("[FTQ] ready=%d\n", io.bpu_to_ftq.resp.ready)
538  XSDebug("resp.s1.target=%x\n", resp.s1.getTarget)
539  XSDebug("resp.s2.target=%x\n", resp.s2.getTarget)
540  // XSDebug("s0_ghist: %b\n", s0_ghist.predHist)
541  // XSDebug("s1_ghist: %b\n", s1_ghist.predHist)
542  // XSDebug("s2_ghist: %b\n", s2_ghist.predHist)
543  // XSDebug("s2_predicted_ghist: %b\n", s2_predicted_ghist.predHist)
544  XSDebug(p"s0_ghist_ptr: $s0_ghist_ptr\n")
545  XSDebug(p"s1_ghist_ptr: $s1_ghist_ptr\n")
546  XSDebug(p"s2_ghist_ptr: $s2_ghist_ptr\n")
547
548  io.ftq_to_bpu.update.bits.display(io.ftq_to_bpu.update.valid)
549  io.ftq_to_bpu.redirect.bits.display(io.ftq_to_bpu.redirect.valid)
550
551
552  XSPerfAccumulate("s2_redirect", s2_redirect)
553
554  val perfEvents = predictors.asInstanceOf[Composer].getPerfEvents
555  generatePerfEvent()
556}
557