1/*************************************************************************************** 2* Copyright (c) 2024 Beijing Institute of Open Source Chip (BOSC) 3* Copyright (c) 2020-2024 Institute of Computing Technology, Chinese Academy of Sciences 4* Copyright (c) 2020-2021 Peng Cheng Laboratory 5* 6* XiangShan is licensed under Mulan PSL v2. 7* You can use this software according to the terms and conditions of the Mulan PSL v2. 8* You may obtain a copy of Mulan PSL v2 at: 9* http://license.coscl.org.cn/MulanPSL2 10* 11* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 12* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 13* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 14* 15* See the Mulan PSL v2 for more details. 16***************************************************************************************/ 17package xiangshan.frontend 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import utility._ 24import xiangshan._ 25import xiangshan.frontend._ 26 27class RASEntry()(implicit p: Parameters) extends XSBundle { 28 val retAddr = UInt(VAddrBits.W) 29 val ctr = UInt(RasCtrSize.W) // layer of nested call functions 30 def =/=(that: RASEntry) = this.retAddr =/= that.retAddr || this.ctr =/= that.ctr 31} 32 33class RASPtr(implicit p: Parameters) extends CircularQueuePtr[RASPtr]( 34 p => p(XSCoreParamsKey).RasSpecSize 35){ 36} 37 38object RASPtr { 39 def apply(f: Bool, v: UInt)(implicit p: Parameters): RASPtr = { 40 val ptr = Wire(new RASPtr) 41 ptr.flag := f 42 ptr.value := v 43 ptr 44 } 45 def inverse(ptr: RASPtr)(implicit p: Parameters): RASPtr = { 46 apply(!ptr.flag, ptr.value) 47 } 48} 49 50class RASInternalMeta(implicit p: Parameters) extends XSBundle { 51 val ssp = UInt(log2Up(RasSize).W) 52 val sctr = UInt(RasCtrSize.W) 53 val TOSW = new RASPtr 54 val TOSR = new RASPtr 55 val NOS = new RASPtr 56} 57 58object RASInternalMeta { 59 def apply(ssp: UInt, sctr: UInt, TOSW: RASPtr, TOSR: RASPtr, NOS: RASPtr)(implicit p: Parameters):RASInternalMeta = { 60 val e = Wire(new RASInternalMeta) 61 e.ssp := ssp 62 e.TOSW := TOSW 63 e.TOSR := TOSR 64 e.NOS := NOS 65 e 66 } 67} 68 69class RASMeta(implicit p: Parameters) extends XSBundle { 70 val ssp = UInt(log2Up(RasSize).W) 71 val TOSW = new RASPtr 72} 73 74object RASMeta { 75 def apply(ssp: UInt, sctr: UInt, TOSW: RASPtr, TOSR: RASPtr, NOS: RASPtr)(implicit p: Parameters):RASMeta = { 76 val e = Wire(new RASMeta) 77 e.ssp := ssp 78 e.TOSW := TOSW 79 e 80 } 81} 82 83class RASDebug(implicit p: Parameters) extends XSBundle { 84 val spec_queue = Output(Vec(RasSpecSize, new RASEntry)) 85 val spec_nos = Output(Vec(RasSpecSize, new RASPtr)) 86 val commit_stack = Output(Vec(RasSize, new RASEntry)) 87} 88 89class RAS(implicit p: Parameters) extends BasePredictor { 90 override val meta_size = WireInit(0.U.asTypeOf(new RASMeta)).getWidth 91 92 object RASEntry { 93 def apply(retAddr: UInt, ctr: UInt): RASEntry = { 94 val e = Wire(new RASEntry) 95 e.retAddr := retAddr 96 e.ctr := ctr 97 e 98 } 99 } 100 101 102 class RASStack(rasSize: Int, rasSpecSize: Int) extends XSModule with HasCircularQueuePtrHelper { 103 val io = IO(new Bundle { 104 val spec_push_valid = Input(Bool()) 105 val spec_pop_valid = Input(Bool()) 106 val spec_push_addr = Input(UInt(VAddrBits.W)) 107 // for write bypass between s2 and s3 108 109 val s2_fire = Input(Bool()) 110 val s3_fire = Input(Bool()) 111 val s3_cancel = Input(Bool()) 112 val s3_meta = Input(new RASInternalMeta) 113 val s3_missed_pop = Input(Bool()) 114 val s3_missed_push = Input(Bool()) 115 val s3_pushAddr = Input(UInt(VAddrBits.W)) 116 val spec_pop_addr = Output(UInt(VAddrBits.W)) 117 118 val commit_push_valid = Input(Bool()) 119 val commit_pop_valid = Input(Bool()) 120 val commit_push_addr = Input(UInt(VAddrBits.W)) 121 val commit_meta_TOSW = Input(new RASPtr) 122 // for debug purpose only 123 val commit_meta_ssp = Input(UInt(log2Up(RasSize).W)) 124 125 val redirect_valid = Input(Bool()) 126 val redirect_isCall = Input(Bool()) 127 val redirect_isRet = Input(Bool()) 128 val redirect_meta_ssp = Input(UInt(log2Up(RasSize).W)) 129 val redirect_meta_sctr = Input(UInt(RasCtrSize.W)) 130 val redirect_meta_TOSW = Input(new RASPtr) 131 val redirect_meta_TOSR = Input(new RASPtr) 132 val redirect_meta_NOS = Input(new RASPtr) 133 val redirect_callAddr = Input(UInt(VAddrBits.W)) 134 135 val ssp = Output(UInt(log2Up(RasSize).W)) 136 val sctr = Output(UInt(RasCtrSize.W)) 137 val nsp = Output(UInt(log2Up(RasSize).W)) 138 val TOSR = Output(new RASPtr) 139 val TOSW = Output(new RASPtr) 140 val NOS = Output(new RASPtr) 141 val BOS = Output(new RASPtr) 142 143 val debug = new RASDebug 144 }) 145 146 val commit_stack = RegInit(VecInit(Seq.fill(RasSize)(RASEntry(0.U, 0.U)))) 147 val spec_queue = RegInit(VecInit(Seq.fill(rasSpecSize)(RASEntry(0.U, 0.U)))) 148 val spec_nos = RegInit(VecInit(Seq.fill(rasSpecSize)(RASPtr(false.B, 0.U)))) 149 150 val nsp = RegInit(0.U(log2Up(rasSize).W)) 151 val ssp = RegInit(0.U(log2Up(rasSize).W)) 152 153 val sctr = RegInit(0.U(RasCtrSize.W)) 154 val TOSR = RegInit(RASPtr(true.B, (RasSpecSize - 1).U)) 155 val TOSW = RegInit(RASPtr(false.B, 0.U)) 156 val BOS = RegInit(RASPtr(false.B, 0.U)) 157 158 val spec_overflowed = RegInit(false.B) 159 160 val writeBypassEntry = Reg(new RASEntry) 161 val writeBypassNos = Reg(new RASPtr) 162 163 val writeBypassValid = RegInit(0.B) 164 val writeBypassValidWire = Wire(Bool()) 165 166 def TOSRinRange(currentTOSR: RASPtr, currentTOSW: RASPtr) = { 167 val inflightValid = WireInit(false.B) 168 // if in range, TOSR should be no younger than BOS and strictly younger than TOSW 169 when (!isBefore(currentTOSR, BOS) && isBefore(currentTOSR, currentTOSW)) { 170 inflightValid := true.B 171 } 172 inflightValid 173 } 174 175 def getCommitTop(currentSsp: UInt) = { 176 commit_stack(currentSsp) 177 } 178 179 def getTopNos(currentTOSR: RASPtr, allowBypass: Boolean):RASPtr = { 180 val ret = Wire(new RASPtr) 181 if (allowBypass){ 182 when (writeBypassValid) { 183 ret := writeBypassNos 184 } .otherwise { 185 ret := spec_nos(TOSR.value) 186 } 187 } else { 188 ret := spec_nos(TOSR.value) // invalid when TOSR is not in range 189 } 190 ret 191 } 192 193 def getTop(currentSsp: UInt, currentSctr: UInt, currentTOSR: RASPtr, currentTOSW: RASPtr, allowBypass: Boolean):RASEntry = { 194 val ret = Wire(new RASEntry) 195 if (allowBypass) { 196 when (writeBypassValid) { 197 ret := writeBypassEntry 198 } .elsewhen (TOSRinRange(currentTOSR, currentTOSW)) { 199 ret := spec_queue(currentTOSR.value) 200 } .otherwise { 201 ret := getCommitTop(currentSsp) 202 } 203 } else { 204 when (TOSRinRange(currentTOSR, currentTOSW)) { 205 ret := spec_queue(currentTOSR.value) 206 } .otherwise { 207 ret := getCommitTop(currentSsp) 208 } 209 } 210 211 ret 212 } 213 214 // it would be unsafe for specPtr manipulation if specSize is not power of 2 215 assert(log2Up(RasSpecSize) == log2Floor(RasSpecSize)) 216 def ctrMax = ((1L << RasCtrSize) - 1).U 217 def ptrInc(ptr: UInt) = ptr + 1.U 218 def ptrDec(ptr: UInt) = ptr - 1.U 219 220 def specPtrInc(ptr: RASPtr) = ptr + 1.U 221 def specPtrDec(ptr: RASPtr) = ptr - 1.U 222 223 224 225 226 227 228 when (io.redirect_valid && io.redirect_isCall) { 229 writeBypassValidWire := true.B 230 writeBypassValid := true.B 231 } .elsewhen (io.redirect_valid) { 232 // clear current top writeBypass if doing redirect 233 writeBypassValidWire := false.B 234 writeBypassValid := false.B 235 } .elsewhen (io.s2_fire) { 236 writeBypassValidWire := io.spec_push_valid 237 writeBypassValid := io.spec_push_valid 238 } .elsewhen (io.s3_fire) { 239 writeBypassValidWire := false.B 240 writeBypassValid := false.B 241 } .otherwise { 242 writeBypassValidWire := writeBypassValid 243 } 244 245 246 247 val topEntry = getTop(ssp, sctr, TOSR, TOSW, true) 248 val topNos = getTopNos(TOSR, true) 249 val redirectTopEntry = getTop(io.redirect_meta_ssp, io.redirect_meta_sctr, io.redirect_meta_TOSR, io.redirect_meta_TOSW, false) 250 val redirectTopNos = io.redirect_meta_NOS 251 val s3TopEntry = getTop(io.s3_meta.ssp, io.s3_meta.sctr, io.s3_meta.TOSR, io.s3_meta.TOSW, false) 252 val s3TopNos = io.s3_meta.NOS 253 254 val writeEntry = Wire(new RASEntry) 255 val writeNos = Wire(new RASPtr) 256 writeEntry.retAddr := Mux(io.redirect_valid && io.redirect_isCall, io.redirect_callAddr, io.spec_push_addr) 257 writeEntry.ctr := Mux(io.redirect_valid && io.redirect_isCall, 258 Mux(redirectTopEntry.retAddr === io.redirect_callAddr && redirectTopEntry.ctr < ctrMax, io.redirect_meta_sctr + 1.U, 0.U), 259 Mux(topEntry.retAddr === io.spec_push_addr && topEntry.ctr < ctrMax, sctr + 1.U, 0.U)) 260 261 writeNos := Mux(io.redirect_valid && io.redirect_isCall, 262 io.redirect_meta_TOSR, TOSR) 263 264 when (io.spec_push_valid || (io.redirect_valid && io.redirect_isCall)) { 265 writeBypassEntry := writeEntry 266 writeBypassNos := writeNos 267 } 268 269 val realPush = Wire(Bool()) 270 val realWriteEntry = Wire(new RASEntry) 271 val timingTop = RegInit(0.U.asTypeOf(new RASEntry)) 272 val timingNos = RegInit(0.U.asTypeOf(new RASPtr)) 273 274 when (writeBypassValidWire) { 275 when ((io.redirect_valid && io.redirect_isCall) || io.spec_push_valid) { 276 timingTop := writeEntry 277 timingNos := writeNos 278 } .otherwise { 279 timingTop := writeBypassEntry 280 timingNos := writeBypassNos 281 } 282 283 } .elsewhen (io.redirect_valid && io.redirect_isRet) { 284 // getTop using redirect Nos as TOSR 285 val popRedSsp = Wire(UInt(log2Up(rasSize).W)) 286 val popRedSctr = Wire(UInt(RasCtrSize.W)) 287 val popRedTOSR = io.redirect_meta_NOS 288 val popRedTOSW = io.redirect_meta_TOSW 289 290 when (io.redirect_meta_sctr > 0.U) { 291 popRedSctr := io.redirect_meta_sctr - 1.U 292 popRedSsp := io.redirect_meta_ssp 293 } .elsewhen (TOSRinRange(popRedTOSR, TOSW)) { 294 popRedSsp := ptrDec(io.redirect_meta_ssp) 295 popRedSctr := spec_queue(popRedTOSR.value).ctr 296 } .otherwise { 297 popRedSsp := ptrDec(io.redirect_meta_ssp) 298 popRedSctr := getCommitTop(ptrDec(io.redirect_meta_ssp)).ctr 299 } 300 // We are deciding top for the next cycle, no need to use bypass here 301 timingTop := getTop(popRedSsp, popRedSctr, popRedTOSR, popRedTOSW, false) 302 } .elsewhen (io.redirect_valid) { 303 // Neither call nor ret 304 val popSsp = io.redirect_meta_ssp 305 val popSctr = io.redirect_meta_sctr 306 val popTOSR = io.redirect_meta_TOSR 307 val popTOSW = io.redirect_meta_TOSW 308 309 timingTop := getTop(popSsp, popSctr, popTOSR, popTOSW, false) 310 311 } .elsewhen (io.spec_pop_valid) { 312 // getTop using current Nos as TOSR 313 val popSsp = Wire(UInt(log2Up(rasSize).W)) 314 val popSctr = Wire(UInt(RasCtrSize.W)) 315 val popTOSR = topNos 316 val popTOSW = TOSW 317 318 when (sctr > 0.U) { 319 popSctr := sctr - 1.U 320 popSsp := ssp 321 } .elsewhen (TOSRinRange(popTOSR, TOSW)) { 322 popSsp := ptrDec(ssp) 323 popSctr := spec_queue(popTOSR.value).ctr 324 } .otherwise { 325 popSsp := ptrDec(ssp) 326 popSctr := getCommitTop(ptrDec(ssp)).ctr 327 } 328 // We are deciding top for the next cycle, no need to use bypass here 329 timingTop := getTop(popSsp, popSctr, popTOSR, popTOSW, false) 330 } .elsewhen (realPush) { 331 // just updating spec queue, cannot read from there 332 timingTop := realWriteEntry 333 } .elsewhen (io.s3_cancel) { 334 // s3 is different with s2 335 timingTop := getTop(io.s3_meta.ssp, io.s3_meta.sctr, io.s3_meta.TOSR, io.s3_meta.TOSW, false) 336 when (io.s3_missed_push) { 337 val writeEntry_s3 = Wire(new RASEntry) 338 timingTop := writeEntry_s3 339 writeEntry_s3.retAddr := io.s3_pushAddr 340 writeEntry_s3.ctr := Mux(timingTop.retAddr === io.s3_pushAddr && io.s3_meta.sctr < ctrMax, io.s3_meta.sctr + 1.U, 0.U) 341 } .elsewhen (io.s3_missed_pop) { 342 val popRedSsp_s3 = Wire(UInt(log2Up(rasSize).W)) 343 val popRedSctr_s3 = Wire(UInt(RasCtrSize.W)) 344 val popRedTOSR_s3 = io.s3_meta.NOS 345 val popRedTOSW_s3 = io.s3_meta.TOSW 346 347 when (io.s3_meta.sctr > 0.U) { 348 popRedSctr_s3 := io.s3_meta.sctr - 1.U 349 popRedSsp_s3 := io.s3_meta.ssp 350 } .elsewhen (TOSRinRange(popRedTOSR_s3, popRedTOSW_s3)) { 351 popRedSsp_s3 := ptrDec(io.s3_meta.ssp) 352 popRedSctr_s3 := spec_queue(popRedTOSR_s3.value).ctr 353 } .otherwise { 354 popRedSsp_s3 := ptrDec(io.s3_meta.ssp) 355 popRedSctr_s3 := getCommitTop(ptrDec(io.s3_meta.ssp)).ctr 356 } 357 // We are deciding top for the next cycle, no need to use bypass here 358 timingTop := getTop(popRedSsp_s3, popRedSctr_s3, popRedTOSR_s3, popRedTOSW_s3, false) 359 } 360 } .otherwise { 361 // easy case 362 val popSsp = ssp 363 val popSctr = sctr 364 val popTOSR = TOSR 365 val popTOSW = TOSW 366 timingTop := getTop(popSsp, popSctr, popTOSR, popTOSW, false) 367 } 368 val diffTop = Mux(writeBypassValid, writeBypassEntry.retAddr, topEntry.retAddr) 369 370 XSPerfAccumulate("ras_top_mismatch", diffTop =/= timingTop.retAddr); 371 // could diff when more pop than push and a commit stack is updated with inflight info 372 373 val realWriteEntry_next = RegEnable(writeEntry, io.s2_fire || io.redirect_isCall) 374 val s3_missPushEntry = Wire(new RASEntry) 375 val s3_missPushAddr = Wire(new RASPtr) 376 val s3_missPushNos = Wire(new RASPtr) 377 378 s3_missPushEntry.retAddr := io.s3_pushAddr 379 s3_missPushEntry.ctr := Mux(s3TopEntry.retAddr === io.s3_pushAddr && s3TopEntry.ctr < ctrMax, io.s3_meta.sctr + 1.U, 0.U) 380 s3_missPushAddr := io.s3_meta.TOSW 381 s3_missPushNos := io.s3_meta.TOSR 382 383 384 385 realWriteEntry := Mux(io.redirect_isCall, realWriteEntry_next, 386 Mux(io.s3_missed_push, s3_missPushEntry, 387 realWriteEntry_next)) 388 389 val realWriteAddr_next = RegEnable(Mux(io.redirect_valid && io.redirect_isCall, io.redirect_meta_TOSW, TOSW), io.s2_fire || (io.redirect_valid && io.redirect_isCall)) 390 val realWriteAddr = Mux(io.redirect_isCall, realWriteAddr_next, 391 Mux(io.s3_missed_push, s3_missPushAddr, 392 realWriteAddr_next)) 393 val realNos_next = RegEnable(Mux(io.redirect_valid && io.redirect_isCall, io.redirect_meta_TOSR, TOSR), io.s2_fire || (io.redirect_valid && io.redirect_isCall)) 394 val realNos = Mux(io.redirect_isCall, realNos_next, 395 Mux(io.s3_missed_push, s3_missPushNos, 396 realNos_next)) 397 398 realPush := (io.s3_fire && (!io.s3_cancel && RegEnable(io.spec_push_valid, io.s2_fire) || io.s3_missed_push)) || RegNext(io.redirect_valid && io.redirect_isCall) 399 400 when (realPush) { 401 spec_queue(realWriteAddr.value) := realWriteEntry 402 spec_nos(realWriteAddr.value) := realNos 403 } 404 405 def specPush(retAddr: UInt, currentSsp: UInt, currentSctr: UInt, currentTOSR: RASPtr, currentTOSW: RASPtr, topEntry: RASEntry) = { 406 TOSR := currentTOSW 407 TOSW := specPtrInc(currentTOSW) 408 // spec sp and ctr should always be maintained 409 when (topEntry.retAddr === retAddr && currentSctr < ctrMax) { 410 sctr := currentSctr + 1.U 411 } .otherwise { 412 ssp := ptrInc(currentSsp) 413 sctr := 0.U 414 } 415 // if we are draining the capacity of spec queue, force move BOS forward 416 when (specPtrInc(currentTOSW) === BOS) { 417 BOS := specPtrInc(BOS) 418 spec_overflowed := true.B; 419 } 420 } 421 XSPerfAccumulate("spec_overflowed", TOSW.value === BOS.value) 422 423 when (io.spec_push_valid) { 424 specPush(io.spec_push_addr, ssp, sctr, TOSR, TOSW, topEntry) 425 } 426 def specPop(currentSsp: UInt, currentSctr: UInt, currentTOSR: RASPtr, currentTOSW: RASPtr, currentTopNos: RASPtr) = { 427 // TOSR is only maintained when spec queue is not empty 428 when (TOSRinRange(currentTOSR, currentTOSW)) { 429 TOSR := currentTopNos 430 } 431 // spec sp and ctr should always be maintained 432 when (currentSctr > 0.U) { 433 sctr := currentSctr - 1.U 434 } .elsewhen (TOSRinRange(currentTopNos, currentTOSW)) { 435 // in range, use inflight data 436 ssp := ptrDec(currentSsp) 437 sctr := spec_queue(currentTopNos.value).ctr 438 } .otherwise { 439 // NOS not in range, use commit data 440 ssp := ptrDec(currentSsp) 441 sctr := getCommitTop(ptrDec(currentSsp)).ctr 442 // in overflow state, we cannot determine the next sctr, sctr here is not accurate 443 } 444 } 445 when (io.spec_pop_valid) { 446 specPop(ssp, sctr, TOSR, TOSW, topNos) 447 } 448 449 // io.spec_pop_addr := Mux(writeBypassValid, writeBypassEntry.retAddr, topEntry.retAddr) 450 451 io.spec_pop_addr := timingTop.retAddr 452 io.BOS := BOS 453 io.TOSW := TOSW 454 io.TOSR := TOSR 455 io.NOS := topNos 456 io.ssp := ssp 457 io.sctr := sctr 458 io.nsp := nsp 459 460 when (io.s3_cancel) { 461 // recovery of all related pointers 462 TOSR := io.s3_meta.TOSR 463 TOSW := io.s3_meta.TOSW 464 ssp := io.s3_meta.ssp 465 sctr := io.s3_meta.sctr 466 467 // for missing pop, we also need to do a pop here 468 when (io.s3_missed_pop) { 469 specPop(io.s3_meta.ssp, io.s3_meta.sctr, io.s3_meta.TOSR, io.s3_meta.TOSW, io.s3_meta.NOS) 470 } 471 when (io.s3_missed_push) { 472 // do not use any bypass from f2 473 specPush(io.s3_pushAddr, io.s3_meta.ssp, io.s3_meta.sctr, io.s3_meta.TOSR, io.s3_meta.TOSW, s3TopEntry) 474 } 475 } 476 477 val commitTop = commit_stack(nsp) 478 479 when (io.commit_pop_valid) { 480 481 val nsp_update = Wire(UInt(log2Up(rasSize).W)) 482 when (io.commit_meta_ssp =/= nsp) { 483 // force set nsp to commit ssp to avoid permanent errors 484 nsp_update := io.commit_meta_ssp 485 } .otherwise { 486 nsp_update := nsp 487 } 488 489 // if ctr > 0, --ctr in stack, otherwise --nsp 490 when (commitTop.ctr > 0.U) { 491 commit_stack(nsp_update).ctr := commitTop.ctr - 1.U 492 nsp := nsp_update 493 } .otherwise { 494 nsp := ptrDec(nsp_update); 495 } 496 // XSError(io.commit_meta_ssp =/= nsp, "nsp mismatch with expected ssp") 497 } 498 499 val commit_push_addr = spec_queue(io.commit_meta_TOSW.value).retAddr 500 501 502 503 when (io.commit_push_valid) { 504 val nsp_update = Wire(UInt(log2Up(rasSize).W)) 505 when (io.commit_meta_ssp =/= nsp) { 506 // force set nsp to commit ssp to avoid permanent errors 507 nsp_update := io.commit_meta_ssp 508 } .otherwise { 509 nsp_update := nsp 510 } 511 // if ctr < max && topAddr == push addr, ++ctr, otherwise ++nsp 512 when (commitTop.ctr < ctrMax && commitTop.retAddr === commit_push_addr) { 513 commit_stack(nsp_update).ctr := commitTop.ctr + 1.U 514 nsp := nsp_update 515 } .otherwise { 516 nsp := ptrInc(nsp_update) 517 commit_stack(ptrInc(nsp_update)).retAddr := commit_push_addr 518 commit_stack(ptrInc(nsp_update)).ctr := 0.U 519 } 520 // when overflow, BOS may be forced move forward, do not revert those changes 521 when (!spec_overflowed || isAfter(io.commit_meta_TOSW, BOS)) { 522 BOS := io.commit_meta_TOSW 523 spec_overflowed := false.B 524 } 525 526 // XSError(io.commit_meta_ssp =/= nsp, "nsp mismatch with expected ssp") 527 // XSError(io.commit_push_addr =/= commit_push_addr, "addr from commit mismatch with addr from spec") 528 } 529 530 when (io.redirect_valid) { 531 TOSR := io.redirect_meta_TOSR 532 TOSW := io.redirect_meta_TOSW 533 ssp := io.redirect_meta_ssp 534 sctr := io.redirect_meta_sctr 535 536 when (io.redirect_isCall) { 537 specPush(io.redirect_callAddr, io.redirect_meta_ssp, io.redirect_meta_sctr, io.redirect_meta_TOSR, io.redirect_meta_TOSW, redirectTopEntry) 538 } 539 when (io.redirect_isRet) { 540 specPop(io.redirect_meta_ssp, io.redirect_meta_sctr, io.redirect_meta_TOSR, io.redirect_meta_TOSW, redirectTopNos) 541 } 542 } 543 544 io.debug.commit_stack.zipWithIndex.foreach{case (a, i) => a := commit_stack(i)} 545 io.debug.spec_nos.zipWithIndex.foreach{case (a, i) => a := spec_nos(i)} 546 io.debug.spec_queue.zipWithIndex.foreach{ case (a, i) => a := spec_queue(i)} 547 } 548 549 val stack = Module(new RASStack(RasSize, RasSpecSize)).io 550 551 val s2_spec_push = WireInit(false.B) 552 val s2_spec_pop = WireInit(false.B) 553 val s2_full_pred = io.in.bits.resp_in(0).s2.full_pred(2) 554 // when last inst is an rvi call, fall through address would be set to the middle of it, so an addition is needed 555 val s2_spec_new_addr = s2_full_pred.fallThroughAddr + Mux(s2_full_pred.last_may_be_rvi_call, 2.U, 0.U) 556 stack.spec_push_valid := s2_spec_push 557 stack.spec_pop_valid := s2_spec_pop 558 stack.spec_push_addr := s2_spec_new_addr 559 560 // confirm that the call/ret is the taken cfi 561 s2_spec_push := io.s2_fire(2) && s2_full_pred.hit_taken_on_call && !io.s3_redirect(2) 562 s2_spec_pop := io.s2_fire(2) && s2_full_pred.hit_taken_on_ret && !io.s3_redirect(2) 563 564 //val s2_jalr_target = io.out.s2.full_pred.jalr_target 565 //val s2_last_target_in = s2_full_pred.targets.last 566 // val s2_last_target_out = io.out.s2.full_pred(2).targets.last 567 val s2_is_jalr = s2_full_pred.is_jalr 568 val s2_is_ret = s2_full_pred.is_ret 569 val s2_top = stack.spec_pop_addr 570 // assert(is_jalr && is_ret || !is_ret) 571 when(s2_is_ret && io.ctrl.ras_enable) { 572 io.out.s2.full_pred.map(_.jalr_target).foreach(_ := s2_top) 573 // FIXME: should use s1 globally 574 } 575 //s2_last_target_out := Mux(s2_is_jalr, s2_jalr_target, s2_last_target_in) 576 io.out.s2.full_pred.zipWithIndex.foreach{ case (a, i) => 577 a.targets.last := Mux(s2_is_jalr, io.out.s2.full_pred(i).jalr_target, io.in.bits.resp_in(0).s2.full_pred(i).targets.last) 578 } 579 580 val s2_meta = Wire(new RASInternalMeta) 581 s2_meta.ssp := stack.ssp 582 s2_meta.sctr := stack.sctr 583 s2_meta.TOSR := stack.TOSR 584 s2_meta.TOSW := stack.TOSW 585 s2_meta.NOS := stack.NOS 586 587 val s3_top = RegEnable(stack.spec_pop_addr, io.s2_fire(2)) 588 val s3_spec_new_addr = RegEnable(s2_spec_new_addr, io.s2_fire(2)) 589 590 // val s3_jalr_target = io.out.s3.full_pred.jalr_target 591 // val s3_last_target_in = io.in.bits.resp_in(0).s3.full_pred(2).targets.last 592 // val s3_last_target_out = io.out.s3.full_pred(2).targets.last 593 val s3_is_jalr = io.in.bits.resp_in(0).s3.full_pred(2).is_jalr 594 val s3_is_ret = io.in.bits.resp_in(0).s3.full_pred(2).is_ret 595 // assert(is_jalr && is_ret || !is_ret) 596 when(s3_is_ret && io.ctrl.ras_enable) { 597 io.out.s3.full_pred.map(_.jalr_target).foreach(_ := s3_top) 598 // FIXME: should use s1 globally 599 } 600 // s3_last_target_out := Mux(s3_is_jalr, s3_jalr_target, s3_last_target_in) 601 io.out.s3.full_pred.zipWithIndex.foreach{ case (a, i) => 602 a.targets.last := Mux(s3_is_jalr, io.out.s3.full_pred(i).jalr_target, io.in.bits.resp_in(0).s3.full_pred(i).targets.last) 603 } 604 605 val s3_pushed_in_s2 = RegEnable(s2_spec_push, io.s2_fire(2)) 606 val s3_popped_in_s2 = RegEnable(s2_spec_pop, io.s2_fire(2)) 607 val s3_push = io.in.bits.resp_in(0).s3.full_pred(2).hit_taken_on_call 608 val s3_pop = io.in.bits.resp_in(0).s3.full_pred(2).hit_taken_on_ret 609 610 val s3_cancel = io.s3_fire(2) && (s3_pushed_in_s2 =/= s3_push || s3_popped_in_s2 =/= s3_pop) 611 stack.s2_fire := io.s2_fire(2) 612 stack.s3_fire := io.s3_fire(2) 613 614 stack.s3_cancel := s3_cancel 615 616 val s3_meta = RegEnable(s2_meta, io.s2_fire(2)) 617 618 stack.s3_meta := s3_meta 619 stack.s3_missed_pop := s3_pop && !s3_popped_in_s2 620 stack.s3_missed_push := s3_push && !s3_pushed_in_s2 621 stack.s3_pushAddr := s3_spec_new_addr 622 623 // no longer need the top Entry, but TOSR, TOSW, ssp sctr 624 // TODO: remove related signals 625 626 val last_stage_meta = Wire(new RASMeta) 627 last_stage_meta.ssp := s3_meta.ssp 628 last_stage_meta.TOSW := s3_meta.TOSW 629 630 io.out.last_stage_spec_info.sctr := s3_meta.sctr 631 io.out.last_stage_spec_info.ssp := s3_meta.ssp 632 io.out.last_stage_spec_info.TOSW := s3_meta.TOSW 633 io.out.last_stage_spec_info.TOSR := s3_meta.TOSR 634 io.out.last_stage_spec_info.NOS := s3_meta.NOS 635 io.out.last_stage_spec_info.topAddr := s3_top 636 io.out.last_stage_meta := last_stage_meta.asUInt 637 638 639 val redirect = RegNextWithEnable(io.redirect) 640 val do_recover = redirect.valid 641 val recover_cfi = redirect.bits.cfiUpdate 642 643 val retMissPred = do_recover && redirect.bits.level === 0.U && recover_cfi.pd.isRet 644 val callMissPred = do_recover && redirect.bits.level === 0.U && recover_cfi.pd.isCall 645 // when we mispredict a call, we must redo a push operation 646 // similarly, when we mispredict a return, we should redo a pop 647 stack.redirect_valid := do_recover 648 stack.redirect_isCall := callMissPred 649 stack.redirect_isRet := retMissPred 650 stack.redirect_meta_ssp := recover_cfi.ssp 651 stack.redirect_meta_sctr := recover_cfi.sctr 652 stack.redirect_meta_TOSW := recover_cfi.TOSW 653 stack.redirect_meta_TOSR := recover_cfi.TOSR 654 stack.redirect_meta_NOS := recover_cfi.NOS 655 stack.redirect_callAddr := recover_cfi.pc + Mux(recover_cfi.pd.isRVC, 2.U, 4.U) 656 657 val update = io.update.bits 658 val updateMeta = io.update.bits.meta.asTypeOf(new RASMeta) 659 val updateValid = io.update.valid 660 661 stack.commit_push_valid := updateValid && update.is_call_taken 662 stack.commit_pop_valid := updateValid && update.is_ret_taken 663 stack.commit_push_addr := update.ftb_entry.getFallThrough(update.pc) + Mux(update.ftb_entry.last_may_be_rvi_call, 2.U, 0.U) 664 stack.commit_meta_TOSW := updateMeta.TOSW 665 stack.commit_meta_ssp := updateMeta.ssp 666 667 668 XSPerfAccumulate("ras_s3_cancel", s3_cancel) 669 XSPerfAccumulate("ras_redirect_recover", redirect.valid) 670 XSPerfAccumulate("ras_s3_and_redirect_recover_at_the_same_time", s3_cancel && redirect.valid) 671 672 673 val spec_debug = stack.debug 674 XSDebug(io.s2_fire(2), "----------------RAS----------------\n") 675 XSDebug(io.s2_fire(2), " TopRegister: 0x%x\n",stack.spec_pop_addr) 676 XSDebug(io.s2_fire(2), " index addr ctr nos (spec part)\n") 677 for(i <- 0 until RasSpecSize){ 678 XSDebug(io.s2_fire(2), " (%d) 0x%x %d %d",i.U,spec_debug.spec_queue(i).retAddr,spec_debug.spec_queue(i).ctr, spec_debug.spec_nos(i).value) 679 when(i.U === stack.TOSW.value){XSDebug(io.s2_fire(2), " <----TOSW")} 680 when(i.U === stack.TOSR.value){XSDebug(io.s2_fire(2), " <----TOSR")} 681 when(i.U === stack.BOS.value){XSDebug(io.s2_fire(2), " <----BOS")} 682 XSDebug(io.s2_fire(2), "\n") 683 } 684 XSDebug(io.s2_fire(2), " index addr ctr (committed part)\n") 685 for(i <- 0 until RasSize){ 686 XSDebug(io.s2_fire(2), " (%d) 0x%x %d",i.U,spec_debug.commit_stack(i).retAddr,spec_debug.commit_stack(i).ctr) 687 when(i.U === stack.ssp){XSDebug(io.s2_fire(2), " <----ssp")} 688 when(i.U === stack.nsp){XSDebug(io.s2_fire(2), " <----nsp")} 689 XSDebug(io.s2_fire(2), "\n") 690 } 691 /* 692 XSDebug(s2_spec_push, "s2_spec_push inAddr: 0x%x inCtr: %d | allocNewEntry:%d | sp:%d \n", 693 s2_spec_new_addr,spec_debug.spec_push_entry.ctr,spec_debug.spec_alloc_new,spec_debug.sp.asUInt) 694 XSDebug(s2_spec_pop, "s2_spec_pop outAddr: 0x%x \n",io.out.s2.getTarget) 695 val s3_recover_entry = spec_debug.recover_push_entry 696 XSDebug(s3_recover && s3_push, "s3_recover_push inAddr: 0x%x inCtr: %d | allocNewEntry:%d | sp:%d \n", 697 s3_recover_entry.retAddr, s3_recover_entry.ctr, spec_debug.recover_alloc_new, s3_sp.asUInt) 698 XSDebug(s3_recover && s3_pop, "s3_recover_pop outAddr: 0x%x \n",io.out.s3.getTarget) 699 val redirectUpdate = redirect.bits.cfiUpdate 700 XSDebug(do_recover && callMissPred, "redirect_recover_push\n") 701 XSDebug(do_recover && retMissPred, "redirect_recover_pop\n") 702 XSDebug(do_recover, "redirect_recover(SP:%d retAddr:%x ctr:%d) \n", 703 redirectUpdate.rasSp,redirectUpdate.rasEntry.retAddr,redirectUpdate.rasEntry.ctr) 704 */ 705 706 generatePerfEvent() 707} 708