1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.frontend 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import xiangshan._ 23import utils._ 24import utility._ 25 26import scala.math.min 27import scala.{Tuple2 => &} 28import os.copy 29 30 31trait FTBParams extends HasXSParameter with HasBPUConst { 32 val numEntries = FtbSize 33 val numWays = FtbWays 34 val numSets = numEntries/numWays // 512 35 val tagSize = 20 36 37 38 39 val TAR_STAT_SZ = 2 40 def TAR_FIT = 0.U(TAR_STAT_SZ.W) 41 def TAR_OVF = 1.U(TAR_STAT_SZ.W) 42 def TAR_UDF = 2.U(TAR_STAT_SZ.W) 43 44 def BR_OFFSET_LEN = 12 45 def JMP_OFFSET_LEN = 20 46 47 def FTBCLOSE_THRESHOLD_SZ = log2Ceil(500) 48 def FTBCLOSE_THRESHOLD = 500.U(FTBCLOSE_THRESHOLD_SZ.W) //can be modified 49} 50 51class FtbSlot_FtqMem(implicit p: Parameters) extends XSBundle with FTBParams { 52 val offset = UInt(log2Ceil(PredictWidth).W) 53 val sharing = Bool() 54 val valid = Bool() 55} 56 57class FtbSlot(val offsetLen: Int, val subOffsetLen: Option[Int] = None)(implicit p: Parameters) extends FtbSlot_FtqMem with FTBParams { 58 if (subOffsetLen.isDefined) { 59 require(subOffsetLen.get <= offsetLen) 60 } 61 val lower = UInt(offsetLen.W) 62 val tarStat = UInt(TAR_STAT_SZ.W) 63 64 def setLowerStatByTarget(pc: UInt, target: UInt, isShare: Boolean) = { 65 def getTargetStatByHigher(pc_higher: UInt, target_higher: UInt) = 66 Mux(target_higher > pc_higher, TAR_OVF, 67 Mux(target_higher < pc_higher, TAR_UDF, TAR_FIT)) 68 def getLowerByTarget(target: UInt, offsetLen: Int) = target(offsetLen, 1) 69 val offLen = if (isShare) this.subOffsetLen.get else this.offsetLen 70 val pc_higher = pc(VAddrBits-1, offLen+1) 71 val target_higher = target(VAddrBits-1, offLen+1) 72 val stat = getTargetStatByHigher(pc_higher, target_higher) 73 val lower = ZeroExt(getLowerByTarget(target, offLen), this.offsetLen) 74 this.lower := lower 75 this.tarStat := stat 76 this.sharing := isShare.B 77 } 78 79 def getTarget(pc: UInt, last_stage: Option[Tuple2[UInt, Bool]] = None) = { 80 def getTarget(offLen: Int)(pc: UInt, lower: UInt, stat: UInt, 81 last_stage: Option[Tuple2[UInt, Bool]] = None) = { 82 val h = pc(VAddrBits - 1, offLen + 1) 83 val higher = Wire(UInt((VAddrBits - offLen - 1).W)) 84 val higher_plus_one = Wire(UInt((VAddrBits - offLen - 1).W)) 85 val higher_minus_one = Wire(UInt((VAddrBits-offLen-1).W)) 86 87 // Switch between previous stage pc and current stage pc 88 // Give flexibility for timing 89 if (last_stage.isDefined) { 90 val last_stage_pc = last_stage.get._1 91 val last_stage_pc_h = last_stage_pc(VAddrBits-1, offLen+1) 92 val stage_en = last_stage.get._2 93 higher := RegEnable(last_stage_pc_h, stage_en) 94 higher_plus_one := RegEnable(last_stage_pc_h+1.U, stage_en) 95 higher_minus_one := RegEnable(last_stage_pc_h-1.U, stage_en) 96 } else { 97 higher := h 98 higher_plus_one := h + 1.U 99 higher_minus_one := h - 1.U 100 } 101 val target = 102 Cat( 103 Mux1H(Seq( 104 (stat === TAR_OVF, higher_plus_one), 105 (stat === TAR_UDF, higher_minus_one), 106 (stat === TAR_FIT, higher), 107 )), 108 lower(offLen-1, 0), 0.U(1.W) 109 ) 110 require(target.getWidth == VAddrBits) 111 require(offLen != 0) 112 target 113 } 114 if (subOffsetLen.isDefined) 115 Mux(sharing, 116 getTarget(subOffsetLen.get)(pc, lower, tarStat, last_stage), 117 getTarget(offsetLen)(pc, lower, tarStat, last_stage) 118 ) 119 else 120 getTarget(offsetLen)(pc, lower, tarStat, last_stage) 121 } 122 def fromAnotherSlot(that: FtbSlot) = { 123 require( 124 this.offsetLen > that.offsetLen && this.subOffsetLen.map(_ == that.offsetLen).getOrElse(true) || 125 this.offsetLen == that.offsetLen 126 ) 127 this.offset := that.offset 128 this.tarStat := that.tarStat 129 this.sharing := (this.offsetLen > that.offsetLen && that.offsetLen == this.subOffsetLen.get).B 130 this.valid := that.valid 131 this.lower := ZeroExt(that.lower, this.offsetLen) 132 } 133 134 def slotConsistent(that: FtbSlot) = { 135 VecInit( 136 this.offset === that.offset, 137 this.lower === that.lower, 138 this.tarStat === that.tarStat, 139 this.sharing === that.sharing, 140 this.valid === that.valid 141 ).reduce(_&&_) 142 } 143 144} 145 146 147class FTBEntry_part(implicit p: Parameters) extends XSBundle with FTBParams with BPUUtils { 148 val isCall = Bool() 149 val isRet = Bool() 150 val isJalr = Bool() 151 152 def isJal = !isJalr 153} 154 155class FTBEntry_FtqMem(implicit p: Parameters) extends FTBEntry_part with FTBParams with BPUUtils { 156 157 val brSlots = Vec(numBrSlot, new FtbSlot_FtqMem) 158 val tailSlot = new FtbSlot_FtqMem 159 160 def jmpValid = { 161 tailSlot.valid && !tailSlot.sharing 162 } 163 164 def getBrRecordedVec(offset: UInt) = { 165 VecInit( 166 brSlots.map(s => s.valid && s.offset === offset) :+ 167 (tailSlot.valid && tailSlot.offset === offset && tailSlot.sharing) 168 ) 169 } 170 171 def brIsSaved(offset: UInt) = getBrRecordedVec(offset).reduce(_||_) 172 173 def getBrMaskByOffset(offset: UInt) = 174 brSlots.map{ s => s.valid && s.offset <= offset } :+ 175 (tailSlot.valid && tailSlot.offset <= offset && tailSlot.sharing) 176 177 def newBrCanNotInsert(offset: UInt) = { 178 val lastSlotForBr = tailSlot 179 lastSlotForBr.valid && lastSlotForBr.offset < offset 180 } 181 182} 183 184class FTBEntry(implicit p: Parameters) extends FTBEntry_part with FTBParams with BPUUtils { 185 186 187 val valid = Bool() 188 189 val brSlots = Vec(numBrSlot, new FtbSlot(BR_OFFSET_LEN)) 190 191 val tailSlot = new FtbSlot(JMP_OFFSET_LEN, Some(BR_OFFSET_LEN)) 192 193 // Partial Fall-Through Address 194 val pftAddr = UInt(log2Up(PredictWidth).W) 195 val carry = Bool() 196 197 val last_may_be_rvi_call = Bool() 198 199 val always_taken = Vec(numBr, Bool()) 200 201 def getSlotForBr(idx: Int): FtbSlot = { 202 require(idx <= numBr-1) 203 (idx, numBr) match { 204 case (i, n) if i == n-1 => this.tailSlot 205 case _ => this.brSlots(idx) 206 } 207 } 208 def allSlotsForBr = { 209 (0 until numBr).map(getSlotForBr(_)) 210 } 211 def setByBrTarget(brIdx: Int, pc: UInt, target: UInt) = { 212 val slot = getSlotForBr(brIdx) 213 slot.setLowerStatByTarget(pc, target, brIdx == numBr-1) 214 } 215 def setByJmpTarget(pc: UInt, target: UInt) = { 216 this.tailSlot.setLowerStatByTarget(pc, target, false) 217 } 218 219 def getTargetVec(pc: UInt, last_stage: Option[Tuple2[UInt, Bool]] = None) = { 220 /* 221 Previous design: Use the getTarget function of FTBSlot to calculate three sets of targets separately; 222 During this process, nine sets of registers will be generated to register the values of the higher plus one minus one 223 Current design: Reuse the duplicate parts of the original nine sets of registers, 224 calculate the common high bits last_stage_pc_higher of brtarget and jmptarget, 225 and the high bits last_stage_pc_middle that need to be added and subtracted from each other, 226 and then concatenate them according to the carry situation to obtain brtarget and jmptarget 227 */ 228 val h_br = pc(VAddrBits - 1, BR_OFFSET_LEN + 1) 229 val higher_br = Wire(UInt((VAddrBits - BR_OFFSET_LEN - 1).W)) 230 val higher_plus_one_br = Wire(UInt((VAddrBits - BR_OFFSET_LEN - 1).W)) 231 val higher_minus_one_br = Wire(UInt((VAddrBits - BR_OFFSET_LEN - 1).W)) 232 val h_tail = pc(VAddrBits - 1, JMP_OFFSET_LEN + 1) 233 val higher_tail = Wire(UInt((VAddrBits - JMP_OFFSET_LEN - 1).W)) 234 val higher_plus_one_tail = Wire(UInt((VAddrBits - JMP_OFFSET_LEN - 1).W)) 235 val higher_minus_one_tail = Wire(UInt((VAddrBits - JMP_OFFSET_LEN - 1).W)) 236 if (last_stage.isDefined) { 237 val last_stage_pc = last_stage.get._1 238 val stage_en = last_stage.get._2 239 val last_stage_pc_higher = RegEnable(last_stage_pc(VAddrBits - 1, JMP_OFFSET_LEN + 1), stage_en) 240 val last_stage_pc_middle = RegEnable(last_stage_pc(JMP_OFFSET_LEN, BR_OFFSET_LEN + 1), stage_en) 241 val last_stage_pc_higher_plus_one = RegEnable(last_stage_pc(VAddrBits - 1, JMP_OFFSET_LEN + 1) + 1.U, stage_en) 242 val last_stage_pc_higher_minus_one = RegEnable(last_stage_pc(VAddrBits - 1, JMP_OFFSET_LEN + 1) - 1.U, stage_en) 243 val last_stage_pc_middle_plus_one = RegEnable(Cat(0.U(1.W), last_stage_pc(JMP_OFFSET_LEN, BR_OFFSET_LEN + 1)) + 1.U, stage_en) 244 val last_stage_pc_middle_minus_one = RegEnable(Cat(0.U(1.W), last_stage_pc(JMP_OFFSET_LEN, BR_OFFSET_LEN + 1)) - 1.U, stage_en) 245 246 higher_br := Cat(last_stage_pc_higher, last_stage_pc_middle) 247 higher_plus_one_br := Mux( 248 last_stage_pc_middle_plus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN), 249 Cat(last_stage_pc_higher_plus_one, last_stage_pc_middle_plus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN-1, 0)), 250 Cat(last_stage_pc_higher, last_stage_pc_middle_plus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN-1, 0))) 251 higher_minus_one_br := Mux( 252 last_stage_pc_middle_minus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN), 253 Cat(last_stage_pc_higher_minus_one, last_stage_pc_middle_minus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN-1, 0)), 254 Cat(last_stage_pc_higher, last_stage_pc_middle_minus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN-1, 0))) 255 256 higher_tail := last_stage_pc_higher 257 higher_plus_one_tail := last_stage_pc_higher_plus_one 258 higher_minus_one_tail := last_stage_pc_higher_minus_one 259 }else{ 260 higher_br := h_br 261 higher_plus_one_br := h_br + 1.U 262 higher_minus_one_br := h_br - 1.U 263 higher_tail := h_tail 264 higher_plus_one_tail := h_tail + 1.U 265 higher_minus_one_tail := h_tail - 1.U 266 } 267 val br_slots_targets = VecInit(brSlots.map(s => 268 Cat( 269 Mux1H(Seq( 270 (s.tarStat === TAR_OVF, higher_plus_one_br), 271 (s.tarStat === TAR_UDF, higher_minus_one_br), 272 (s.tarStat === TAR_FIT, higher_br), 273 )), 274 s.lower(s.offsetLen-1, 0), 0.U(1.W) 275 ) 276 )) 277 val tail_target = Wire(UInt(VAddrBits.W)) 278 if(tailSlot.subOffsetLen.isDefined){ 279 tail_target := Mux(tailSlot.sharing, 280 Cat( 281 Mux1H(Seq( 282 (tailSlot.tarStat === TAR_OVF, higher_plus_one_br), 283 (tailSlot.tarStat === TAR_UDF, higher_minus_one_br), 284 (tailSlot.tarStat === TAR_FIT, higher_br), 285 )), 286 tailSlot.lower(tailSlot.subOffsetLen.get-1, 0), 0.U(1.W) 287 ), 288 Cat( 289 Mux1H(Seq( 290 (tailSlot.tarStat === TAR_OVF, higher_plus_one_tail), 291 (tailSlot.tarStat === TAR_UDF, higher_minus_one_tail), 292 (tailSlot.tarStat === TAR_FIT, higher_tail), 293 )), 294 tailSlot.lower(tailSlot.offsetLen-1, 0), 0.U(1.W) 295 ) 296 ) 297 }else{ 298 tail_target := Cat( 299 Mux1H(Seq( 300 (tailSlot.tarStat === TAR_OVF, higher_plus_one_tail), 301 (tailSlot.tarStat === TAR_UDF, higher_minus_one_tail), 302 (tailSlot.tarStat === TAR_FIT, higher_tail), 303 )), 304 tailSlot.lower(tailSlot.offsetLen-1, 0), 0.U(1.W) 305 ) 306 } 307 308 br_slots_targets.map(t => require(t.getWidth == VAddrBits)) 309 require(tail_target.getWidth == VAddrBits) 310 val targets = VecInit(br_slots_targets :+ tail_target) 311 targets 312 } 313 314 def getOffsetVec = VecInit(brSlots.map(_.offset) :+ tailSlot.offset) 315 def getFallThrough(pc: UInt, last_stage_entry: Option[Tuple2[FTBEntry, Bool]] = None) = { 316 if (last_stage_entry.isDefined) { 317 var stashed_carry = RegEnable(last_stage_entry.get._1.carry, last_stage_entry.get._2) 318 getFallThroughAddr(pc, stashed_carry, pftAddr) 319 } else { 320 getFallThroughAddr(pc, carry, pftAddr) 321 } 322 } 323 324 def hasBr(offset: UInt) = 325 brSlots.map{ s => s.valid && s.offset <= offset}.reduce(_||_) || 326 (tailSlot.valid && tailSlot.offset <= offset && tailSlot.sharing) 327 328 def getBrMaskByOffset(offset: UInt) = 329 brSlots.map{ s => s.valid && s.offset <= offset } :+ 330 (tailSlot.valid && tailSlot.offset <= offset && tailSlot.sharing) 331 332 def getBrRecordedVec(offset: UInt) = { 333 VecInit( 334 brSlots.map(s => s.valid && s.offset === offset) :+ 335 (tailSlot.valid && tailSlot.offset === offset && tailSlot.sharing) 336 ) 337 } 338 339 def brIsSaved(offset: UInt) = getBrRecordedVec(offset).reduce(_||_) 340 341 def brValids = { 342 VecInit( 343 brSlots.map(_.valid) :+ (tailSlot.valid && tailSlot.sharing) 344 ) 345 } 346 347 def noEmptySlotForNewBr = { 348 VecInit(brSlots.map(_.valid) :+ tailSlot.valid).reduce(_&&_) 349 } 350 351 def newBrCanNotInsert(offset: UInt) = { 352 val lastSlotForBr = tailSlot 353 lastSlotForBr.valid && lastSlotForBr.offset < offset 354 } 355 356 def jmpValid = { 357 tailSlot.valid && !tailSlot.sharing 358 } 359 360 def brOffset = { 361 VecInit(brSlots.map(_.offset) :+ tailSlot.offset) 362 } 363 364 def entryConsistent(that: FTBEntry) = { 365 val validDiff = this.valid === that.valid 366 val brSlotsDiffSeq : IndexedSeq[Bool] = 367 this.brSlots.zip(that.brSlots).map{ 368 case(x, y) => x.slotConsistent(y) 369 } 370 val tailSlotDiff = this.tailSlot.slotConsistent(that.tailSlot) 371 val pftAddrDiff = this.pftAddr === that.pftAddr 372 val carryDiff = this.carry === that.carry 373 val isCallDiff = this.isCall === that.isCall 374 val isRetDiff = this.isRet === that.isRet 375 val isJalrDiff = this.isJalr === that.isJalr 376 val lastMayBeRviCallDiff = this.last_may_be_rvi_call === that.last_may_be_rvi_call 377 val alwaysTakenDiff : IndexedSeq[Bool] = 378 this.always_taken.zip(that.always_taken).map{ 379 case(x, y) => x === y 380 } 381 VecInit( 382 validDiff, 383 brSlotsDiffSeq.reduce(_&&_), 384 tailSlotDiff, 385 pftAddrDiff, 386 carryDiff, 387 isCallDiff, 388 isRetDiff, 389 isJalrDiff, 390 lastMayBeRviCallDiff, 391 alwaysTakenDiff.reduce(_&&_) 392 ).reduce(_&&_) 393 } 394 395 def display(cond: Bool): Unit = { 396 XSDebug(cond, p"-----------FTB entry----------- \n") 397 XSDebug(cond, p"v=${valid}\n") 398 for(i <- 0 until numBr) { 399 XSDebug(cond, p"[br$i]: v=${allSlotsForBr(i).valid}, offset=${allSlotsForBr(i).offset}," + 400 p"lower=${Hexadecimal(allSlotsForBr(i).lower)}\n") 401 } 402 XSDebug(cond, p"[tailSlot]: v=${tailSlot.valid}, offset=${tailSlot.offset}," + 403 p"lower=${Hexadecimal(tailSlot.lower)}, sharing=${tailSlot.sharing}}\n") 404 XSDebug(cond, p"pftAddr=${Hexadecimal(pftAddr)}, carry=$carry\n") 405 XSDebug(cond, p"isCall=$isCall, isRet=$isRet, isjalr=$isJalr\n") 406 XSDebug(cond, p"last_may_be_rvi_call=$last_may_be_rvi_call\n") 407 XSDebug(cond, p"------------------------------- \n") 408 } 409 410} 411 412class FTBEntryWithTag(implicit p: Parameters) extends XSBundle with FTBParams with BPUUtils { 413 val entry = new FTBEntry 414 val tag = UInt(tagSize.W) 415 def display(cond: Bool): Unit = { 416 entry.display(cond) 417 XSDebug(cond, p"tag is ${Hexadecimal(tag)}\n------------------------------- \n") 418 } 419} 420 421class FTBMeta(implicit p: Parameters) extends XSBundle with FTBParams { 422 val writeWay = UInt(log2Ceil(numWays).W) 423 val hit = Bool() 424 val pred_cycle = if (!env.FPGAPlatform) Some(UInt(64.W)) else None 425} 426 427object FTBMeta { 428 def apply(writeWay: UInt, hit: Bool, pred_cycle: UInt)(implicit p: Parameters): FTBMeta = { 429 val e = Wire(new FTBMeta) 430 e.writeWay := writeWay 431 e.hit := hit 432 e.pred_cycle.map(_ := pred_cycle) 433 e 434 } 435} 436 437// class UpdateQueueEntry(implicit p: Parameters) extends XSBundle with FTBParams { 438// val pc = UInt(VAddrBits.W) 439// val ftb_entry = new FTBEntry 440// val hit = Bool() 441// val hit_way = UInt(log2Ceil(numWays).W) 442// } 443// 444// object UpdateQueueEntry { 445// def apply(pc: UInt, fe: FTBEntry, hit: Bool, hit_way: UInt)(implicit p: Parameters): UpdateQueueEntry = { 446// val e = Wire(new UpdateQueueEntry) 447// e.pc := pc 448// e.ftb_entry := fe 449// e.hit := hit 450// e.hit_way := hit_way 451// e 452// } 453// } 454 455class FTB(implicit p: Parameters) extends BasePredictor with FTBParams with BPUUtils 456 with HasCircularQueuePtrHelper with HasPerfEvents { 457 override val meta_size = WireInit(0.U.asTypeOf(new FTBMeta)).getWidth 458 459 val ftbAddr = new TableAddr(log2Up(numSets), 1) 460 461 class FTBBank(val numSets: Int, val nWays: Int) extends XSModule with BPUUtils { 462 val io = IO(new Bundle { 463 val s1_fire = Input(Bool()) 464 465 // when ftb hit, read_hits.valid is true, and read_hits.bits is OH of hit way 466 // when ftb not hit, read_hits.valid is false, and read_hits is OH of allocWay 467 // val read_hits = Valid(Vec(numWays, Bool())) 468 val req_pc = Flipped(DecoupledIO(UInt(VAddrBits.W))) 469 val read_resp = Output(new FTBEntry) 470 val read_hits = Valid(UInt(log2Ceil(numWays).W)) 471 472 val read_multi_entry = Output(new FTBEntry) 473 val read_multi_hits = Valid(UInt(log2Ceil(numWays).W)) 474 475 val u_req_pc = Flipped(DecoupledIO(UInt(VAddrBits.W))) 476 val update_hits = Valid(UInt(log2Ceil(numWays).W)) 477 val update_access = Input(Bool()) 478 479 val update_pc = Input(UInt(VAddrBits.W)) 480 val update_write_data = Flipped(Valid(new FTBEntryWithTag)) 481 val update_write_way = Input(UInt(log2Ceil(numWays).W)) 482 val update_write_alloc = Input(Bool()) 483 }) 484 485 // Extract holdRead logic to fix bug that update read override predict read result 486 val ftb = Module(new SRAMTemplate(new FTBEntryWithTag, set = numSets, way = numWays, shouldReset = true, holdRead = false, singlePort = true)) 487 val ftb_r_entries = ftb.io.r.resp.data.map(_.entry) 488 489 val pred_rdata = HoldUnless(ftb.io.r.resp.data, RegNext(io.req_pc.valid && !io.update_access)) 490 ftb.io.r.req.valid := io.req_pc.valid || io.u_req_pc.valid // io.s0_fire 491 ftb.io.r.req.bits.setIdx := Mux(io.u_req_pc.valid, ftbAddr.getIdx(io.u_req_pc.bits), ftbAddr.getIdx(io.req_pc.bits)) // s0_idx 492 493 assert(!(io.req_pc.valid && io.u_req_pc.valid)) 494 495 io.req_pc.ready := ftb.io.r.req.ready 496 io.u_req_pc.ready := ftb.io.r.req.ready 497 498 val req_tag = RegEnable(ftbAddr.getTag(io.req_pc.bits)(tagSize-1, 0), io.req_pc.valid) 499 val req_idx = RegEnable(ftbAddr.getIdx(io.req_pc.bits), io.req_pc.valid) 500 501 val u_req_tag = RegEnable(ftbAddr.getTag(io.u_req_pc.bits)(tagSize-1, 0), io.u_req_pc.valid) 502 503 val read_entries = pred_rdata.map(_.entry) 504 val read_tags = pred_rdata.map(_.tag) 505 506 val total_hits = VecInit((0 until numWays).map(b => read_tags(b) === req_tag && read_entries(b).valid && io.s1_fire)) 507 val hit = total_hits.reduce(_||_) 508 // val hit_way_1h = VecInit(PriorityEncoderOH(total_hits)) 509 val hit_way = OHToUInt(total_hits) 510 511 //There may be two hits in the four paths of the ftbBank, and the OHToUInt will fail. 512 //If there is a redirect in s2 at this time, the wrong FTBEntry will be used to calculate the target, 513 //resulting in an address error and affecting performance. 514 //The solution is to select a hit entry during multi hit as the entry for s2. 515 //Considering timing, use this entry in s3 and trigger s3-redirect. 516 val total_hits_reg = RegEnable(total_hits, io.s1_fire) 517 val read_entries_reg = read_entries.map(w => RegEnable(w, io.s1_fire)) 518 519 val multi_hit = VecInit((0 until numWays).map{ 520 i => (0 until numWays).map(j => { 521 if(i < j) total_hits_reg(i) && total_hits_reg(j) 522 else false.B 523 }).reduce(_||_) 524 }).reduce(_||_) 525 val multi_way = PriorityMux(Seq.tabulate(numWays)(i => ((total_hits_reg(i)) -> i.asUInt(log2Ceil(numWays).W)))) 526 val multi_hit_selectEntry = PriorityMux(Seq.tabulate(numWays)(i => ((total_hits_reg(i)) -> read_entries_reg(i)))) 527 528 //Check if the entry read by ftbBank is legal. 529 for (n <- 0 to numWays -1 ) { 530 val req_pc_reg = RegEnable(io.req_pc.bits, io.req_pc.valid) 531 val ftb_entry_fallThrough = read_entries(n).getFallThrough(req_pc_reg) 532 when(read_entries(n).valid && total_hits(n) && io.s1_fire){ 533 assert(req_pc_reg + (2*PredictWidth).U >= ftb_entry_fallThrough, s"FTB sram entry in way${n} fallThrough address error!") 534 } 535 } 536 537 val u_total_hits = VecInit((0 until numWays).map(b => 538 ftb.io.r.resp.data(b).tag === u_req_tag && ftb.io.r.resp.data(b).entry.valid && RegNext(io.update_access))) 539 val u_hit = u_total_hits.reduce(_||_) 540 // val hit_way_1h = VecInit(PriorityEncoderOH(total_hits)) 541 val u_hit_way = OHToUInt(u_total_hits) 542 543 // assert(PopCount(total_hits) === 1.U || PopCount(total_hits) === 0.U) 544 // assert(PopCount(u_total_hits) === 1.U || PopCount(u_total_hits) === 0.U) 545 for (n <- 1 to numWays) { 546 XSPerfAccumulate(f"ftb_pred_${n}_way_hit", PopCount(total_hits) === n.U) 547 XSPerfAccumulate(f"ftb_update_${n}_way_hit", PopCount(u_total_hits) === n.U) 548 } 549 550 val replacer = ReplacementPolicy.fromString(Some("setplru"), numWays, numSets) 551 // val allocWriteWay = replacer.way(req_idx) 552 553 val touch_set = Seq.fill(1)(Wire(UInt(log2Ceil(numSets).W))) 554 val touch_way = Seq.fill(1)(Wire(Valid(UInt(log2Ceil(numWays).W)))) 555 556 val write_set = Wire(UInt(log2Ceil(numSets).W)) 557 val write_way = Wire(Valid(UInt(log2Ceil(numWays).W))) 558 559 val read_set = Wire(UInt(log2Ceil(numSets).W)) 560 val read_way = Wire(Valid(UInt(log2Ceil(numWays).W))) 561 562 read_set := req_idx 563 read_way.valid := hit 564 read_way.bits := hit_way 565 566 // Read replacer access is postponed for 1 cycle 567 // this helps timing 568 touch_set(0) := Mux(write_way.valid, write_set, RegNext(read_set)) 569 touch_way(0).valid := write_way.valid || RegNext(read_way.valid) 570 touch_way(0).bits := Mux(write_way.valid, write_way.bits, RegNext(read_way.bits)) 571 572 replacer.access(touch_set, touch_way) 573 574 // Select the update allocate way 575 // Selection logic: 576 // 1. if any entries within the same index is not valid, select it 577 // 2. if all entries is valid, use replacer 578 def allocWay(valids: UInt, idx: UInt): UInt = { 579 if (numWays > 1) { 580 val w = Wire(UInt(log2Up(numWays).W)) 581 val valid = WireInit(valids.andR) 582 w := Mux(valid, replacer.way(idx), PriorityEncoder(~valids)) 583 w 584 } else { 585 val w = WireInit(0.U(log2Up(numWays).W)) 586 w 587 } 588 } 589 590 io.read_resp := Mux1H(total_hits, read_entries) // Mux1H 591 io.read_hits.valid := hit 592 io.read_hits.bits := hit_way 593 594 io.read_multi_entry := multi_hit_selectEntry 595 io.read_multi_hits.valid := multi_hit 596 io.read_multi_hits.bits := multi_way 597 598 io.update_hits.valid := u_hit 599 io.update_hits.bits := u_hit_way 600 601 // Update logic 602 val u_valid = io.update_write_data.valid 603 val u_data = io.update_write_data.bits 604 val u_idx = ftbAddr.getIdx(io.update_pc) 605 val allocWriteWay = allocWay(RegNext(VecInit(ftb_r_entries.map(_.valid))).asUInt, u_idx) 606 val u_way = Mux(io.update_write_alloc, allocWriteWay, io.update_write_way) 607 val u_mask = UIntToOH(u_way) 608 609 for (i <- 0 until numWays) { 610 XSPerfAccumulate(f"ftb_replace_way$i", u_valid && io.update_write_alloc && u_way === i.U) 611 XSPerfAccumulate(f"ftb_replace_way${i}_has_empty", u_valid && io.update_write_alloc && !ftb_r_entries.map(_.valid).reduce(_&&_) && u_way === i.U) 612 XSPerfAccumulate(f"ftb_hit_way$i", hit && !io.update_access && hit_way === i.U) 613 } 614 615 ftb.io.w.apply(u_valid, u_data, u_idx, u_mask) 616 617 // for replacer 618 write_set := u_idx 619 write_way.valid := u_valid 620 write_way.bits := Mux(io.update_write_alloc, allocWriteWay, io.update_write_way) 621 622 // print hit entry info 623 Mux1H(total_hits, ftb.io.r.resp.data).display(true.B) 624 } // FTBBank 625 626 //FTB switch register & temporary storage of fauftb prediction results 627 val s0_close_ftb_req = RegInit(false.B) 628 val s1_close_ftb_req = RegEnable(s0_close_ftb_req, false.B, io.s0_fire(0)) 629 val s2_close_ftb_req = RegEnable(s1_close_ftb_req, false.B, io.s1_fire(0)) 630 val s2_fauftb_ftb_entry_dup = io.s1_fire.map(f => RegEnable(io.fauftb_entry_in, f)) 631 val s2_fauftb_ftb_entry_hit_dup = io.s1_fire.map(f => RegEnable(io.fauftb_entry_hit_in, f)) 632 633 val ftbBank = Module(new FTBBank(numSets, numWays)) 634 635 //for close ftb read_req 636 ftbBank.io.req_pc.valid := io.s0_fire(0) && !s0_close_ftb_req 637 ftbBank.io.req_pc.bits := s0_pc_dup(0) 638 639 val s2_multi_hit = ftbBank.io.read_multi_hits.valid && io.s2_fire(0) 640 val s2_multi_hit_way = ftbBank.io.read_multi_hits.bits 641 val s2_multi_hit_entry = ftbBank.io.read_multi_entry 642 val s2_multi_hit_enable = s2_multi_hit && io.s2_redirect(0) 643 XSPerfAccumulate("ftb_s2_multi_hit", s2_multi_hit) 644 XSPerfAccumulate("ftb_s2_multi_hit_enable", s2_multi_hit_enable) 645 646 //After closing ftb, the entry output from s2 is the entry of FauFTB cached in s1 647 val btb_enable_dup = dup(RegNext(io.ctrl.btb_enable)) 648 val s1_read_resp = Mux(s1_close_ftb_req, io.fauftb_entry_in, ftbBank.io.read_resp) 649 val s2_ftbBank_dup = io.s1_fire.map(f => RegEnable(ftbBank.io.read_resp, f)) 650 val s2_ftb_entry_dup = dup(0.U.asTypeOf(new FTBEntry)) 651 for(((s2_fauftb_entry, s2_ftbBank_entry), s2_ftb_entry) <- 652 s2_fauftb_ftb_entry_dup zip s2_ftbBank_dup zip s2_ftb_entry_dup){ 653 s2_ftb_entry := Mux(s2_close_ftb_req, s2_fauftb_entry, s2_ftbBank_entry) 654 } 655 val s3_ftb_entry_dup = io.s2_fire.zip(s2_ftb_entry_dup).map {case (f, e) => RegEnable(Mux(s2_multi_hit_enable, s2_multi_hit_entry, e), f)} 656 657 //After closing ftb, the hit output from s2 is the hit of FauFTB cached in s1. 658 //s1_hit is the ftbBank hit. 659 val s1_hit = Mux(s1_close_ftb_req, false.B, ftbBank.io.read_hits.valid && io.ctrl.btb_enable) 660 val s2_ftb_hit_dup = io.s1_fire.map(f => RegEnable(s1_hit, 0.B, f)) 661 val s2_hit_dup = dup(0.U.asTypeOf(Bool())) 662 for(((s2_fauftb_hit, s2_ftb_hit), s2_hit) <- 663 s2_fauftb_ftb_entry_hit_dup zip s2_ftb_hit_dup zip s2_hit_dup){ 664 s2_hit := Mux(s2_close_ftb_req, s2_fauftb_hit, s2_ftb_hit) 665 } 666 val s3_hit_dup = io.s2_fire.zip(s2_hit_dup).map {case (f, h) => RegEnable(Mux(s2_multi_hit_enable, s2_multi_hit, h), 0.B, f)} 667 val s3_mult_hit_dup = io.s2_fire.map(f => RegEnable(s2_multi_hit_enable,f)) 668 val writeWay = Mux(s1_close_ftb_req, 0.U, ftbBank.io.read_hits.bits) 669 val s2_ftb_meta = RegEnable(FTBMeta(writeWay.asUInt, s1_hit, GTimer()).asUInt, io.s1_fire(0)) 670 val s2_multi_hit_meta = FTBMeta(s2_multi_hit_way.asUInt, s2_multi_hit, GTimer()).asUInt 671 672 //Consistent count of entries for fauftb and ftb 673 val fauftb_ftb_entry_consistent_counter = RegInit(0.U(FTBCLOSE_THRESHOLD_SZ.W)) 674 val fauftb_ftb_entry_consistent = s2_fauftb_ftb_entry_dup(0).entryConsistent(s2_ftbBank_dup(0)) 675 676 //if close ftb_req, the counter need keep 677 when(io.s2_fire(0) && s2_fauftb_ftb_entry_hit_dup(0) && s2_ftb_hit_dup(0) ){ 678 fauftb_ftb_entry_consistent_counter := Mux(fauftb_ftb_entry_consistent, fauftb_ftb_entry_consistent_counter + 1.U, 0.U) 679 } .elsewhen(io.s2_fire(0) && !s2_fauftb_ftb_entry_hit_dup(0) && s2_ftb_hit_dup(0) ){ 680 fauftb_ftb_entry_consistent_counter := 0.U 681 } 682 683 when((fauftb_ftb_entry_consistent_counter >= FTBCLOSE_THRESHOLD) && io.s0_fire(0)){ 684 s0_close_ftb_req := true.B 685 } 686 687 //Clear counter during false_hit or ifuRedirect 688 val ftb_false_hit = WireInit(false.B) 689 val needReopen = s0_close_ftb_req && (ftb_false_hit || io.redirectFromIFU) 690 ftb_false_hit := io.update.valid && io.update.bits.false_hit 691 when(needReopen){ 692 fauftb_ftb_entry_consistent_counter := 0.U 693 s0_close_ftb_req := false.B 694 } 695 696 val s2_close_consistent = s2_fauftb_ftb_entry_dup(0).entryConsistent(s2_ftb_entry_dup(0)) 697 val s2_not_close_consistent = s2_ftbBank_dup(0).entryConsistent(s2_ftb_entry_dup(0)) 698 699 when(s2_close_ftb_req && io.s2_fire(0)){ 700 assert(s2_close_consistent, s"Entry inconsistency after ftb req is closed!") 701 }.elsewhen(!s2_close_ftb_req && io.s2_fire(0)){ 702 assert(s2_not_close_consistent, s"Entry inconsistency after ftb req is not closed!") 703 } 704 705 val reopenCounter = !s1_close_ftb_req && s2_close_ftb_req && io.s2_fire(0) 706 val falseHitReopenCounter = ftb_false_hit && s1_close_ftb_req 707 XSPerfAccumulate("ftb_req_reopen_counter", reopenCounter) 708 XSPerfAccumulate("false_hit_reopen_Counter", falseHitReopenCounter) 709 XSPerfAccumulate("ifuRedirec_needReopen",s1_close_ftb_req && io.redirectFromIFU) 710 XSPerfAccumulate("this_cycle_is_close",s2_close_ftb_req && io.s2_fire(0)) 711 XSPerfAccumulate("this_cycle_is_open",!s2_close_ftb_req && io.s2_fire(0)) 712 713 // io.out.bits.resp := RegEnable(io.in.bits.resp_in(0), 0.U.asTypeOf(new BranchPredictionResp), io.s1_fire) 714 io.out := io.in.bits.resp_in(0) 715 716 io.out.s2.full_pred.map {case fp => fp.multiHit := false.B} 717 718 io.out.s2.full_pred.zip(s2_hit_dup).map {case (fp, h) => fp.hit := h} 719 for (full_pred & s2_ftb_entry & s2_pc & s1_pc & s1_fire <- 720 io.out.s2.full_pred zip s2_ftb_entry_dup zip s2_pc_dup zip s1_pc_dup zip io.s1_fire) { 721 full_pred.fromFtbEntry(s2_ftb_entry, 722 s2_pc.getAddr(), 723 // Previous stage meta for better timing 724 Some(s1_pc, s1_fire), 725 Some(s1_read_resp, s1_fire) 726 ) 727 } 728 729 io.out.s3.full_pred.zip(s3_hit_dup).map {case (fp, h) => fp.hit := h} 730 io.out.s3.full_pred.zip(s3_mult_hit_dup).map {case (fp, m) => fp.multiHit := m} 731 for (full_pred & s3_ftb_entry & s3_pc & s2_pc & s2_fire <- 732 io.out.s3.full_pred zip s3_ftb_entry_dup zip s3_pc_dup zip s2_pc_dup zip io.s2_fire) 733 full_pred.fromFtbEntry(s3_ftb_entry, s3_pc.getAddr(), Some((s2_pc.getAddr(), s2_fire))) 734 735 io.out.last_stage_ftb_entry := s3_ftb_entry_dup(0) 736 io.out.last_stage_meta := RegEnable(Mux(s2_multi_hit_enable, s2_multi_hit_meta, s2_ftb_meta), io.s2_fire(0)) 737 io.out.s1_ftbCloseReq := s1_close_ftb_req 738 io.out.s1_uftbHit := io.fauftb_entry_hit_in 739 val s1_uftbHasIndirect = io.fauftb_entry_in.jmpValid && 740 io.fauftb_entry_in.isJalr && !io.fauftb_entry_in.isRet // uFTB determines that it's real JALR, RET and JAL are excluded 741 io.out.s1_uftbHasIndirect := s1_uftbHasIndirect 742 743 // always taken logic 744 for (i <- 0 until numBr) { 745 for (out_fp & in_fp & s2_hit & s2_ftb_entry <- 746 io.out.s2.full_pred zip io.in.bits.resp_in(0).s2.full_pred zip s2_hit_dup zip s2_ftb_entry_dup) 747 out_fp.br_taken_mask(i) := in_fp.br_taken_mask(i) || s2_hit && s2_ftb_entry.always_taken(i) 748 for (out_fp & in_fp & s3_hit & s3_ftb_entry <- 749 io.out.s3.full_pred zip io.in.bits.resp_in(0).s3.full_pred zip s3_hit_dup zip s3_ftb_entry_dup) 750 out_fp.br_taken_mask(i) := in_fp.br_taken_mask(i) || s3_hit && s3_ftb_entry.always_taken(i) 751 } 752 753 // Update logic 754 val update = io.update.bits 755 756 val u_meta = update.meta.asTypeOf(new FTBMeta) 757 val u_valid = io.update.valid && !io.update.bits.old_entry 758 759 val (_, delay2_pc) = DelayNWithValid(update.pc, u_valid, 2) 760 val (_, delay2_entry) = DelayNWithValid(update.ftb_entry, u_valid, 2) 761 762 763 val update_now = u_valid && u_meta.hit 764 val update_need_read = u_valid && !u_meta.hit 765 // stall one more cycle because we use a whole cycle to do update read tag hit 766 io.s1_ready := ftbBank.io.req_pc.ready && !(update_need_read) && !RegNext(update_need_read) 767 768 ftbBank.io.u_req_pc.valid := update_need_read 769 ftbBank.io.u_req_pc.bits := update.pc 770 771 772 773 val ftb_write = Wire(new FTBEntryWithTag) 774 ftb_write.entry := Mux(update_now, update.ftb_entry, delay2_entry) 775 ftb_write.tag := ftbAddr.getTag(Mux(update_now, update.pc, delay2_pc))(tagSize-1, 0) 776 777 val write_valid = update_now || DelayN(u_valid && !u_meta.hit, 2) 778 val write_pc = Mux(update_now, update.pc, delay2_pc) 779 780 ftbBank.io.update_write_data.valid := write_valid 781 ftbBank.io.update_write_data.bits := ftb_write 782 ftbBank.io.update_pc := write_pc 783 ftbBank.io.update_write_way := Mux(update_now, u_meta.writeWay, RegNext(ftbBank.io.update_hits.bits)) // use it one cycle later 784 ftbBank.io.update_write_alloc := Mux(update_now, false.B, RegNext(!ftbBank.io.update_hits.valid)) // use it one cycle later 785 ftbBank.io.update_access := u_valid && !u_meta.hit 786 ftbBank.io.s1_fire := io.s1_fire(0) 787 788 val ftb_write_fallThrough = ftb_write.entry.getFallThrough(write_pc) 789 when(write_valid){ 790 assert(write_pc + (FetchWidth * 4).U >= ftb_write_fallThrough, s"FTB write_entry fallThrough address error!") 791 } 792 793 XSDebug("req_v=%b, req_pc=%x, ready=%b (resp at next cycle)\n", io.s0_fire(0), s0_pc_dup(0), ftbBank.io.req_pc.ready) 794 XSDebug("s2_hit=%b, hit_way=%b\n", s2_hit_dup(0), writeWay.asUInt) 795 XSDebug("s2_br_taken_mask=%b, s2_real_taken_mask=%b\n", 796 io.in.bits.resp_in(0).s2.full_pred(0).br_taken_mask.asUInt, io.out.s2.full_pred(0).real_slot_taken_mask().asUInt) 797 XSDebug("s2_target=%x\n", io.out.s2.getTarget(0)) 798 799 s2_ftb_entry_dup(0).display(true.B) 800 801 XSPerfAccumulate("ftb_read_hits", RegNext(io.s0_fire(0)) && s1_hit) 802 XSPerfAccumulate("ftb_read_misses", RegNext(io.s0_fire(0)) && !s1_hit) 803 804 XSPerfAccumulate("ftb_commit_hits", io.update.valid && u_meta.hit) 805 XSPerfAccumulate("ftb_commit_misses", io.update.valid && !u_meta.hit) 806 807 XSPerfAccumulate("ftb_update_req", io.update.valid) 808 XSPerfAccumulate("ftb_update_ignored", io.update.valid && io.update.bits.old_entry) 809 XSPerfAccumulate("ftb_updated", u_valid) 810 811 override val perfEvents = Seq( 812 ("ftb_commit_hits ", io.update.valid && u_meta.hit), 813 ("ftb_commit_misses ", io.update.valid && !u_meta.hit), 814 ) 815 generatePerfEvent() 816} 817