xref: /XiangShan/src/main/scala/xiangshan/frontend/BPU.scala (revision 2f99ffddd1e71e1eb61327db791159c8e4096618)
1package xiangshan.frontend
2
3import chisel3._
4import chisel3.util._
5import xiangshan._
6import xiangshan.utils._
7import xiangshan.backend.ALUOpType
8import utils._
9
10class TableAddr(val idxBits: Int, val banks: Int) extends XSBundle {
11  def tagBits = VAddrBits - idxBits - 2
12
13  val tag = UInt(tagBits.W)
14  val idx = UInt(idxBits.W)
15  val offset = UInt(2.W)
16
17  def fromUInt(x: UInt) = x.asTypeOf(UInt(VAddrBits.W)).asTypeOf(this)
18  def getTag(x: UInt) = fromUInt(x).tag
19  def getIdx(x: UInt) = fromUInt(x).idx
20  def getBank(x: UInt) = getIdx(x)(log2Up(banks) - 1, 0)
21  def getBankIdx(x: UInt) = getIdx(x)(idxBits - 1, log2Up(banks))
22}
23
24class Stage1To2IO extends XSBundle {
25  val pc = Output(UInt(VAddrBits.W))
26  val btb = new Bundle {
27    val hits = Output(UInt(FetchWidth.W))
28    val targets = Output(Vec(FetchWidth, UInt(VAddrBits.W)))
29  }
30  val jbtac = new Bundle {
31    val hitIdx = Output(UInt(FetchWidth.W))
32    val target = Output(UInt(VAddrBits.W))
33  }
34  val tage = new Bundle {
35    val hits = Output(UInt(FetchWidth.W))
36    val takens = Output(Vec(FetchWidth, Bool()))
37  }
38  val hist = Output(Vec(FetchWidth, UInt(HistoryLength.W)))
39  val btbPred = ValidIO(new BranchPrediction)
40}
41
42class BPUStage1 extends XSModule {
43  val io = IO(new Bundle() {
44    val in = new Bundle { val pc = Flipped(Decoupled(UInt(VAddrBits.W))) }
45    // from backend
46    val redirectInfo = Flipped(new RedirectInfo)
47    // from Stage3
48    val flush = Input(Bool())
49    val s3RollBackHist = Input(UInt(HistoryLength.W))
50    // to ifu, quick prediction result
51    val s1OutPred = ValidIO(new BranchPrediction)
52    // to Stage2
53    val out = Decoupled(new Stage1To2IO)
54  })
55
56  // flush Stage1 when io.flush
57  val flushS1 = BoolStopWatch(io.flush, io.in.pc.fire(), startHighPriority = true)
58
59  // global history register
60  val ghr = RegInit(0.U(HistoryLength.W))
61  // modify updateGhr and newGhr when updating ghr
62  val updateGhr = WireInit(false.B)
63  val newGhr = WireInit(0.U(HistoryLength.W))
64  when (updateGhr) { ghr := newGhr }
65  // use hist as global history!!!
66  val hist = Mux(updateGhr, newGhr, ghr)
67
68  // Tage predictor
69  val tage = Module(new Tage)
70  tage.io.req.valid := io.in.pc.fire()
71  tage.io.req.bits.pc := io.in.pc.bits
72  tage.io.req.bits.hist := hist
73  tage.io.redirectInfo <> io.redirectInfo
74  io.out.bits.tage <> tage.io.out
75  io.s1OutPred.bits.tageMeta := tage.io.meta
76
77  // flush Stage1 when io.flush || io.redirect.valid
78  val flush = flushS1 || io.redirectInfo.valid
79
80  val btbAddr = new TableAddr(log2Up(BtbSets), BtbBanks)
81  val predictWidth = FetchWidth
82  def btbTarget = new Bundle {
83    val addr = UInt(VAddrBits.W)
84    val pred = UInt(2.W) // 2-bit saturated counter as a quick predictor
85    val _type = UInt(2.W)
86    val offset = UInt(offsetBits().W) // Could be zero
87
88    def offsetBits() = log2Up(FetchWidth / predictWidth)
89  }
90
91  def btbEntry() = new Bundle {
92    val valid = Bool()
93    // TODO: don't need full length of tag and target
94    val tag = UInt(btbAddr.tagBits.W)
95    val target = Vec(predictWidth, btbTarget)
96  }
97
98  val btb = List.fill(BtbWays)(List.fill(BtbBanks)(
99    Module(new SRAMTemplate(btbEntry(), set = BtbSets / BtbBanks, shouldReset = true, holdRead = true, singlePort = false))))
100
101  // val btbReadBank = btbAddr.getBank(io.in.pc.bits)
102
103  // BTB read requests
104  // read addr comes from pc[6:2]
105  // read 4 ways in parallel
106  (0 until BtbWays).map(
107    w => (0 until BtbBanks).map(
108      b => {
109        btb(w)(b).reset := reset.asBool
110        btb(w)(b).io.r.req.valid := io.in.pc.valid && b.U === btbAddr.getBank(io.in.pc.bits)
111        btb(w)(b).io.r.req.bits.setIdx := btbAddr.getBankIdx(io.in.pc.bits)
112      }))
113
114  // latch pc for 1 cycle latency when reading SRAM
115  val pcLatch = RegEnable(io.in.pc.bits, io.in.pc.valid)
116  // Entries read from SRAM
117  val btbRead = Wire(Vec(BtbWays, Vec(BtbBanks, btbEntry())))
118  // 1/4 hit
119  val btbHits = Wire(Vec(BtbWays, Bool()))
120
121  // #(predictWidth) results
122  val btbTargets = Wire(Vec(predictWidth, UInt(VAddrBits.W)))
123  val btbTypes = Wire(Vec(predictWidth, UInt(2.W)))
124  // val btbPreds = Wire(Vec(FetchWidth, UInt(2.W)))
125  val btbTakens = Wire(Vec(predictWidth, Bool()))
126
127  val btbHitWay = Wire(UInt(log2Up(BtbWays).W))
128
129  (0 until BtbWays).map(
130    w => (0 until BtbBanks).map(
131      b => btbRead(w)(b) := btb(w)(b).io.r.resp.data(0)
132    )
133  )
134
135
136  for (w <- 0 until BtbWays) {
137    for (b <- 0 until BtbBanks) {
138      when (b.U === btbAddr.getBank(pcLatch) && btbRead(w)(b).valid && btbRead(w)(b).tag === btbAddr.getTag(pcLatch)) {
139        btbHits(w) := !flush && RegNext(btb(w)(b).io.r.req.fire(), init = false.B)
140        btbHitWay := w.U
141        for (i <- 0 until predictWidth) {
142          btbTargets(i) := btbRead(w)(b).target(i).addr
143          btbTypes(i) := btbRead(w)(b).target(i)._type
144          btbTakens(i) := (btbRead(b)(w).target(i).pred)(1).asBool
145        }
146      }.otherwise {
147        btbHits(w) := false.B
148        btbHitWay := DontCare
149        for (i <- 0 until predictWidth) {
150          btbTargets(i) := DontCare
151          btbTypes(i) := DontCare
152          btbTakens(i) := DontCare
153        }
154      }
155    }
156  }
157
158
159  val btbHit = btbHits.reduce(_|_)
160
161  // Priority mux which corresponds with inst orders
162  // BTB only produce one single prediction
163  val btbTakenTarget = MuxCase(0.U, btbTakens zip btbTargets)
164  val btbTakenType   = MuxCase(0.U, btbTakens zip btbTypes)
165  val btbTaken       = btbTakens.reduce(_|_)
166  // Record which inst is predicted taken
167  val btbTakenIdx = MuxCase(0.U, btbTakens zip (0 until predictWidth).map(_.U))
168
169  // JBTAC, divided into 8 banks, makes prediction for indirect jump except ret.
170  val jbtacAddr = new TableAddr(log2Up(JbtacSize), JbtacBanks)
171  def jbtacEntry() = new Bundle {
172    val valid = Bool()
173    // TODO: don't need full length of tag and target
174    val tag = UInt(jbtacAddr.tagBits.W)
175    val target = UInt(VAddrBits.W)
176    val offset = UInt(log2Up(FetchWidth).W)
177  }
178
179  val jbtac = List.fill(JbtacBanks)(Module(new SRAMTemplate(jbtacEntry(), set = JbtacSize / JbtacBanks, shouldReset = true, holdRead = true, singlePort = false)))
180
181  val jbtacRead = Wire(Vec(JbtacBanks, jbtacEntry()))
182
183  val jbtacFire = Wire(Vec(JbtacBanks, RegInit(Bool(), init=false.B)))
184  // Only read one bank
185  (0 until JbtacBanks).map(
186    b=>{
187      jbtac(b).reset := reset.asBool
188      jbtac(b).io.r.req.valid := io.in.pc.valid && b.U === jbtacAddr.getBank(io.in.pc.bits)
189      jbtac(b).io.r.req.bits.setIdx := jbtacAddr.getBankIdx(io.in.pc.bits)
190      jbtacFire(b) := jbtac(b).io.r.req.fire()
191      jbtacRead(b) := jbtac(b).io.r.resp.data(0)
192    }
193  )
194
195  val jbtacBank = jbtacAddr.getBank(pcLatch)
196  val jbtacHit = jbtacRead(jbtacBank).valid && jbtacRead(jbtacBank).tag === jbtacAddr.getTag(pcLatch) && !flush && jbtacFire(jbtacBank)
197  val jbtacHitIdx = jbtacRead(jbtacBank).offset
198  val jbtacTarget = jbtacRead(jbtacBank).target
199
200
201  // redirect based on BTB and JBTAC
202  // (0 until FetchWidth).map(i => io.predMask(i) := btbHits(i) && Mux(btbTypes(i) === BTBtype.B, btbTakens(i), true.B) || jbtacHits(i))
203  // (0 until FetchWidth).map(i => io.predTargets(i) := Mux(btbHits(i) && !(btbTypes(i) === BTBtype.B && !btbTakens(i)), btbTargets(i), jbtacTargets(i)))
204
205  io.out.valid := RegNext(io.in.pc.valid) && !flush
206
207  io.s1OutPred.valid := RegNext(io.in.pc.valid)
208  io.s1OutPred.bits.redirect := btbHit && btbTaken || jbtacHit
209  io.s1OutPred.bits.instrValid := ~LowerMask(btbTakenIdx, FetchWidth) & ~LowerMask(jbtacHitIdx, FetchWidth)
210  io.s1OutPred.bits.target := Mux(btbTakenIdx < jbtacHitIdx, btbTakenTarget, jbtacTarget)
211  io.s1OutPred.bits.hist := DontCare
212  io.s1OutPred.bits.rasSp := DontCare
213  io.s1OutPred.bits.rasTopCtr := DontCare
214
215  io.out.bits.pc := pcLatch
216  io.out.bits.btb.hits := btbHit
217  (0 until FetchWidth).map(i=>io.out.bits.btb.targets(i) := btbTargets(i))
218  io.out.bits.jbtac.hitIdx := jbtacHitIdx
219  io.out.bits.jbtac.target := jbtacTarget
220  io.out.bits.tage := DontCare
221  io.out.bits.hist := DontCare
222  io.out.bits.btbPred := io.s1OutPred
223
224
225  // TODO: delete this!!!
226  io.in.pc.ready := true.B
227
228}
229
230class Stage2To3IO extends Stage1To2IO {
231}
232
233class BPUStage2 extends XSModule {
234  val io = IO(new Bundle() {
235    // flush from Stage3
236    val flush = Input(Bool())
237    val in = Flipped(Decoupled(new Stage1To2IO))
238    val out = Decoupled(new Stage2To3IO)
239  })
240
241  // flush Stage2 when Stage3 or banckend redirects
242  val flushS2 = BoolStopWatch(io.flush, io.in.fire(), startHighPriority = true)
243  io.out.valid := !flushS2 && RegNext(io.in.fire())
244  io.in.ready := !io.out.valid || io.out.fire()
245
246  // do nothing
247  io.out.bits := RegEnable(io.in.bits, io.in.fire())
248}
249
250class BPUStage3 extends XSModule {
251  val io = IO(new Bundle() {
252    val flush = Input(Bool())
253    val in = Flipped(Decoupled(new Stage2To3IO))
254    val out = ValidIO(new BranchPrediction)
255    // from icache
256    val predecode = Flipped(ValidIO(new Predecode))
257    // from backend
258    val redirectInfo = Flipped(new RedirectInfo)
259    // to Stage1 and Stage2
260    val flushBPU = Output(Bool())
261    // to Stage1, restore ghr in stage1 when flushBPU is valid
262    val s1RollBackHist = Output(UInt(HistoryLength.W))
263  })
264
265  val flushS3 = BoolStopWatch(io.flush, io.in.fire(), startHighPriority = true)
266  val inLatch = RegInit(0.U.asTypeOf(io.in.bits))
267  val validLatch = RegInit(false.B)
268  when (io.in.fire()) { inLatch := io.in.bits }
269  when (io.in.fire()) {
270    validLatch := !io.flush
271  }.elsewhen (io.out.valid) {
272    validLatch := false.B
273  }
274  io.out.valid := validLatch && io.predecode.valid && !flushS3
275  io.in.ready := !validLatch || io.out.valid
276
277  // RAS
278  // TODO: split retAddr and ctr
279  def rasEntry() = new Bundle {
280    val retAddr = UInt(VAddrBits.W)
281    val ctr = UInt(8.W) // layer of nested call functions
282  }
283  val ras = RegInit(VecInit(Seq.fill(RasSize)(0.U.asTypeOf(rasEntry()))))
284  val sp = Counter(RasSize)
285  val rasTop = ras(sp.value)
286  val rasTopAddr = rasTop.retAddr
287
288  // get the first taken branch/jal/call/jalr/ret in a fetch line
289  // brTakenIdx/jalIdx/callIdx/jalrIdx/retIdx/jmpIdx is one-hot encoded.
290  // brNotTakenIdx indicates all the not-taken branches before the first jump instruction.
291  val brIdx = inLatch.btb.hits & Cat(io.predecode.bits.fuTypes.map { t => ALUOpType.isBranch(t) }).asUInt & io.predecode.bits.mask
292  val brTakenIdx = LowestBit(brIdx & inLatch.tage.takens.asUInt, FetchWidth)
293  val jalIdx = LowestBit(inLatch.btb.hits & Cat(io.predecode.bits.fuTypes.map { t => t === ALUOpType.jal }).asUInt & io.predecode.bits.mask, FetchWidth)
294  val callIdx = LowestBit(inLatch.btb.hits & io.predecode.bits.mask & Cat(io.predecode.bits.fuTypes.map { t => t === ALUOpType.call }).asUInt, FetchWidth)
295  val jalrIdx = LowestBit(inLatch.jbtac.hitIdx & io.predecode.bits.mask & Cat(io.predecode.bits.fuTypes.map { t => t === ALUOpType.jalr }).asUInt, FetchWidth)
296  val retIdx = LowestBit(io.predecode.bits.mask & Cat(io.predecode.bits.fuTypes.map { t => t === ALUOpType.ret }).asUInt, FetchWidth)
297
298  val jmpIdx = LowestBit(brTakenIdx | jalIdx | callIdx | jalrIdx | retIdx, FetchWidth)
299  val brNotTakenIdx = brIdx & ~inLatch.tage.takens.asUInt & LowerMask(jmpIdx, FetchWidth)
300
301  io.out.bits.redirect := jmpIdx.orR.asBool
302  io.out.bits.target := Mux(jmpIdx === retIdx, rasTopAddr,
303    Mux(jmpIdx === jalrIdx, inLatch.jbtac.target,
304    Mux(jmpIdx === 0.U, inLatch.pc + 4.U, // TODO: RVC
305    PriorityMux(jmpIdx, inLatch.btb.targets))))
306  io.out.bits.instrValid := LowerMask(jmpIdx, FetchWidth).asTypeOf(Vec(FetchWidth, Bool()))
307  io.out.bits.tageMeta := inLatch.btbPred.bits.tageMeta
308  //io.out.bits._type := Mux(jmpIdx === retIdx, BTBtype.R,
309  //  Mux(jmpIdx === jalrIdx, BTBtype.I,
310  //  Mux(jmpIdx === brTakenIdx, BTBtype.B, BTBtype.J)))
311  val firstHist = inLatch.btbPred.bits.hist(0)
312  // there may be several notTaken branches before the first jump instruction,
313  // so we need to calculate how many zeroes should each instruction shift in its global history.
314  // each history is exclusive of instruction's own jump direction.
315  val histShift = Wire(Vec(FetchWidth, UInt(log2Up(FetchWidth).W)))
316  val shift = Wire(Vec(FetchWidth, Vec(FetchWidth, UInt(1.W))))
317  (0 until FetchWidth).map(i => shift(i) := Mux(!brNotTakenIdx(i), 0.U, ~LowerMask(UIntToOH(i.U), FetchWidth)).asTypeOf(Vec(FetchWidth, UInt(1.W))))
318  for (j <- 0 until FetchWidth) {
319    var tmp = 0.U
320    for (i <- 0 until FetchWidth) {
321      tmp = tmp + shift(i)(j)
322    }
323    histShift(j) := tmp
324  }
325  (0 until FetchWidth).map(i => io.out.bits.hist(i) := firstHist << histShift(i))
326  // save ras checkpoint info
327  io.out.bits.rasSp := sp.value
328  io.out.bits.rasTopCtr := rasTop.ctr
329
330  // flush BPU and redirect when target differs from the target predicted in Stage1
331  io.out.bits.redirect := !inLatch.btbPred.bits.redirect ^ jmpIdx.orR.asBool ||
332    inLatch.btbPred.bits.redirect && jmpIdx.orR.asBool && io.out.bits.target =/= inLatch.btbPred.bits.target
333  io.flushBPU := io.out.bits.redirect && io.out.valid
334
335  // speculative update RAS
336  val rasWrite = WireInit(0.U.asTypeOf(rasEntry()))
337  rasWrite.retAddr := inLatch.pc + OHToUInt(callIdx) << 2.U + 4.U
338  val allocNewEntry = rasWrite.retAddr =/= rasTopAddr
339  rasWrite.ctr := Mux(allocNewEntry, 1.U, rasTop.ctr + 1.U)
340  when (io.out.valid) {
341    when (jmpIdx === callIdx) {
342      ras(Mux(allocNewEntry, sp.value + 1.U, sp.value)) := rasWrite
343      when (allocNewEntry) { sp.value := sp.value + 1.U }
344    }.elsewhen (jmpIdx === retIdx) {
345      when (rasTop.ctr === 1.U) {
346        sp.value := Mux(sp.value === 0.U, 0.U, sp.value - 1.U)
347      }.otherwise {
348        ras(sp.value) := Cat(rasTop.ctr - 1.U, rasTopAddr).asTypeOf(rasEntry())
349      }
350    }
351  }
352  // use checkpoint to recover RAS
353  val recoverSp = io.redirectInfo.redirect.rasSp
354  val recoverCtr = io.redirectInfo.redirect.rasTopCtr
355  when (io.redirectInfo.valid && io.redirectInfo.misPred) {
356    sp.value := recoverSp
357    ras(recoverSp) := Cat(recoverCtr, ras(recoverSp).retAddr).asTypeOf(rasEntry())
358  }
359
360  // roll back global history in S1 if S3 redirects
361  io.s1RollBackHist := PriorityMux(jmpIdx, io.out.bits.hist)
362}
363
364class BPU extends XSModule {
365  val io = IO(new Bundle() {
366    // from backend
367    // flush pipeline if misPred and update bpu based on redirect signals from brq
368    val redirectInfo = Flipped(new RedirectInfo)
369
370    val in = new Bundle { val pc = Flipped(Valid(UInt(VAddrBits.W))) }
371
372    val btbOut = ValidIO(new BranchPrediction)
373    val tageOut = ValidIO(new BranchPrediction)
374
375    // predecode info from icache
376    // TODO: simplify this after implement predecode unit
377    val predecode = Flipped(ValidIO(new Predecode))
378  })
379
380  val s1 = Module(new BPUStage1)
381  val s2 = Module(new BPUStage2)
382  val s3 = Module(new BPUStage3)
383
384  s1.io.redirectInfo <> io.redirectInfo
385  s1.io.flush := s3.io.flushBPU || io.redirectInfo.flush()
386  s1.io.in.pc.valid := io.in.pc.valid
387  s1.io.in.pc.bits <> io.in.pc.bits
388  io.btbOut <> s1.io.s1OutPred
389  s1.io.s3RollBackHist := s3.io.s1RollBackHist
390
391  s1.io.out <> s2.io.in
392  s2.io.flush := s3.io.flushBPU || io.redirectInfo.flush()
393
394  s2.io.out <> s3.io.in
395  s3.io.flush := io.redirectInfo.flush()
396  s3.io.predecode <> io.predecode
397  io.tageOut <> s3.io.out
398  s3.io.redirectInfo <> io.redirectInfo
399}