1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16package xiangshan.frontend 17 18import org.chipsalliance.cde.config.Parameters 19import chisel3._ 20import chisel3.util._ 21import utils._ 22import utility._ 23import xiangshan._ 24import xiangshan.frontend._ 25 26class RASEntry()(implicit p: Parameters) extends XSBundle { 27 val retAddr = UInt(VAddrBits.W) 28 val ctr = UInt(8.W) // layer of nested call functions 29 def =/=(that: RASEntry) = this.retAddr =/= that.retAddr || this.ctr =/= that.ctr 30} 31 32class RASPtr(implicit p: Parameters) extends CircularQueuePtr[RASPtr]( 33 p => p(XSCoreParamsKey).RasSpecSize 34){ 35} 36 37object RASPtr { 38 def apply(f: Bool, v: UInt)(implicit p: Parameters): RASPtr = { 39 val ptr = Wire(new RASPtr) 40 ptr.flag := f 41 ptr.value := v 42 ptr 43 } 44 def inverse(ptr: RASPtr)(implicit p: Parameters): RASPtr = { 45 apply(!ptr.flag, ptr.value) 46 } 47} 48 49class RASMeta(implicit p: Parameters) extends XSBundle { 50 val ssp = UInt(log2Up(RasSize).W) 51 val sctr = UInt(RasCtrSize.W) 52 val TOSW = new RASPtr 53 val TOSR = new RASPtr 54 val NOS = new RASPtr 55} 56 57object RASMeta { 58 def apply(ssp: UInt, sctr: UInt, TOSW: RASPtr, TOSR: RASPtr, NOS: RASPtr)(implicit p: Parameters):RASMeta = { 59 val e = Wire(new RASMeta) 60 e.ssp := ssp 61 e.sctr := sctr 62 e.TOSW := TOSW 63 e.TOSR := TOSR 64 e.NOS := NOS 65 e 66 } 67} 68 69class RASDebug(implicit p: Parameters) extends XSBundle { 70 val spec_queue = Output(Vec(RasSpecSize, new RASEntry)) 71 val spec_nos = Output(Vec(RasSpecSize, new RASPtr)) 72 val commit_stack = Output(Vec(RasSize, new RASEntry)) 73} 74 75class RAS(implicit p: Parameters) extends BasePredictor { 76 override val meta_size = WireInit(0.U.asTypeOf(new RASMeta)).getWidth 77 78 object RASEntry { 79 def apply(retAddr: UInt, ctr: UInt): RASEntry = { 80 val e = Wire(new RASEntry) 81 e.retAddr := retAddr 82 e.ctr := ctr 83 e 84 } 85 } 86 87 88 class RASStack(rasSize: Int, rasSpecSize: Int) extends XSModule with HasCircularQueuePtrHelper { 89 val io = IO(new Bundle { 90 val spec_push_valid = Input(Bool()) 91 val spec_pop_valid = Input(Bool()) 92 val spec_push_addr = Input(UInt(VAddrBits.W)) 93 // for write bypass between s2 and s3 94 95 val s2_fire = Input(Bool()) 96 val s3_fire = Input(Bool()) 97 val s3_cancel = Input(Bool()) 98 val s3_meta = Input(new RASMeta) 99 val s3_missed_pop = Input(Bool()) 100 val s3_missed_push = Input(Bool()) 101 val s3_pushAddr = Input(UInt(VAddrBits.W)) 102 val spec_pop_addr = Output(UInt(VAddrBits.W)) 103 104 val commit_push_valid = Input(Bool()) 105 val commit_pop_valid = Input(Bool()) 106 val commit_push_addr = Input(UInt(VAddrBits.W)) 107 val commit_meta_TOSW = Input(new RASPtr) 108 val commit_meta_TOSR = Input(new RASPtr) 109 // for debug purpose only 110 val commit_meta_ssp = Input(UInt(log2Up(RasSize).W)) 111 val commit_meta_sctr = Input(UInt(RasCtrSize.W)) 112 113 val redirect_valid = Input(Bool()) 114 val redirect_isCall = Input(Bool()) 115 val redirect_isRet = Input(Bool()) 116 val redirect_meta_ssp = Input(UInt(log2Up(RasSize).W)) 117 val redirect_meta_sctr = Input(UInt(RasCtrSize.W)) 118 val redirect_meta_TOSW = Input(new RASPtr) 119 val redirect_meta_TOSR = Input(new RASPtr) 120 val redirect_meta_NOS = Input(new RASPtr) 121 val redirect_callAddr = Input(UInt(VAddrBits.W)) 122 123 val ssp = Output(UInt(log2Up(RasSize).W)) 124 val sctr = Output(UInt(RasCtrSize.W)) 125 val nsp = Output(UInt(log2Up(RasSize).W)) 126 val TOSR = Output(new RASPtr) 127 val TOSW = Output(new RASPtr) 128 val NOS = Output(new RASPtr) 129 val BOS = Output(new RASPtr) 130 131 val debug = new RASDebug 132 }) 133 134 val commit_stack = RegInit(VecInit(Seq.fill(RasSize)(RASEntry(0.U, 0.U)))) 135 val spec_queue = RegInit(VecInit(Seq.fill(rasSpecSize)(RASEntry(0.U, 0.U)))) 136 val spec_nos = RegInit(VecInit(Seq.fill(rasSpecSize)(RASPtr(false.B, 0.U)))) 137 138 val nsp = RegInit(0.U(log2Up(rasSize).W)) 139 val ssp = RegInit(0.U(log2Up(rasSize).W)) 140 141 val sctr = RegInit(0.U(RasCtrSize.W)) 142 val TOSR = RegInit(RASPtr(true.B, (RasSpecSize - 1).U)) 143 val TOSW = RegInit(RASPtr(false.B, 0.U)) 144 val BOS = RegInit(RASPtr(false.B, 0.U)) 145 146 val spec_overflowed = RegInit(false.B) 147 148 val writeBypassEntry = Reg(new RASEntry) 149 val writeBypassNos = Reg(new RASPtr) 150 151 val writeBypassValid = RegInit(0.B) 152 val writeBypassValidWire = Wire(Bool()) 153 154 def TOSRinRange(currentTOSR: RASPtr, currentTOSW: RASPtr) = { 155 val inflightValid = WireInit(false.B) 156 // if in range, TOSR should be no younger than BOS and strictly younger than TOSW 157 when (!isBefore(currentTOSR, BOS) && isBefore(currentTOSR, currentTOSW)) { 158 inflightValid := true.B 159 } 160 inflightValid 161 } 162 163 def getCommitTop(currentSsp: UInt) = { 164 commit_stack(currentSsp) 165 } 166 167 def getTopNos(currentTOSR: RASPtr, allowBypass: Boolean):RASPtr = { 168 val ret = Wire(new RASPtr) 169 if (allowBypass){ 170 when (writeBypassValid) { 171 ret := writeBypassNos 172 } .otherwise { 173 ret := spec_nos(TOSR.value) 174 } 175 } else { 176 ret := spec_nos(TOSR.value) // invalid when TOSR is not in range 177 } 178 ret 179 } 180 181 def getTop(currentSsp: UInt, currentSctr: UInt, currentTOSR: RASPtr, currentTOSW: RASPtr, allowBypass: Boolean):RASEntry = { 182 val ret = Wire(new RASEntry) 183 if (allowBypass) { 184 when (writeBypassValid) { 185 ret := writeBypassEntry 186 } .elsewhen (TOSRinRange(currentTOSR, currentTOSW)) { 187 ret := spec_queue(currentTOSR.value) 188 } .otherwise { 189 ret := getCommitTop(currentSsp) 190 } 191 } else { 192 when (TOSRinRange(currentTOSR, currentTOSW)) { 193 ret := spec_queue(currentTOSR.value) 194 } .otherwise { 195 ret := getCommitTop(currentSsp) 196 } 197 } 198 199 ret 200 } 201 202 // it would be unsafe for specPtr manipulation if specSize is not power of 2 203 assert(log2Up(RasSpecSize) == log2Floor(RasSpecSize)) 204 def ctrMax = ((1l << RasCtrSize) - 1).U 205 def ptrInc(ptr: UInt) = ptr + 1.U 206 def ptrDec(ptr: UInt) = ptr - 1.U 207 208 def specPtrInc(ptr: RASPtr) = ptr + 1.U 209 def specPtrDec(ptr: RASPtr) = ptr - 1.U 210 211 212 213 214 215 216 when (io.redirect_valid && io.redirect_isCall) { 217 writeBypassValidWire := true.B 218 writeBypassValid := true.B 219 } .elsewhen (io.redirect_valid) { 220 // clear current top writeBypass if doing redirect 221 writeBypassValidWire := false.B 222 writeBypassValid := false.B 223 } .elsewhen (io.s2_fire) { 224 writeBypassValidWire := io.spec_push_valid 225 writeBypassValid := io.spec_push_valid 226 } .elsewhen (io.s3_fire) { 227 writeBypassValidWire := false.B 228 writeBypassValid := false.B 229 } .otherwise { 230 writeBypassValidWire := writeBypassValid 231 } 232 233 234 235 val topEntry = getTop(ssp, sctr, TOSR, TOSW, true) 236 val topNos = getTopNos(TOSR, true) 237 val redirectTopEntry = getTop(io.redirect_meta_ssp, io.redirect_meta_sctr, io.redirect_meta_TOSR, io.redirect_meta_TOSW, false) 238 val redirectTopNos = io.redirect_meta_NOS 239 val s3TopEntry = getTop(io.s3_meta.ssp, io.s3_meta.sctr, io.s3_meta.TOSR, io.s3_meta.TOSW, false) 240 val s3TopNos = io.s3_meta.NOS 241 242 val writeEntry = Wire(new RASEntry) 243 val writeNos = Wire(new RASPtr) 244 writeEntry.retAddr := Mux(io.redirect_valid && io.redirect_isCall, io.redirect_callAddr, io.spec_push_addr) 245 writeEntry.ctr := Mux(io.redirect_valid && io.redirect_isCall, 246 Mux(redirectTopEntry.retAddr === io.redirect_callAddr && redirectTopEntry.ctr < ctrMax, io.redirect_meta_sctr + 1.U, 0.U), 247 Mux(topEntry.retAddr === io.spec_push_addr && topEntry.ctr < ctrMax, sctr + 1.U, 0.U)) 248 249 writeNos := Mux(io.redirect_valid && io.redirect_isCall, 250 io.redirect_meta_NOS, TOSR) 251 252 when (io.spec_push_valid || (io.redirect_valid && io.redirect_isCall)) { 253 writeBypassEntry := writeEntry 254 writeBypassNos := writeNos 255 } 256 257 val realPush = Wire(Bool()) 258 val realWriteEntry = Wire(new RASEntry) 259 val timingTop = RegInit(0.U.asTypeOf(new RASEntry)) 260 val timingNos = RegInit(0.U.asTypeOf(new RASPtr)) 261 262 when (writeBypassValidWire) { 263 when ((io.redirect_valid && io.redirect_isCall) || io.spec_push_valid) { 264 timingTop := writeEntry 265 timingNos := writeNos 266 } .otherwise { 267 timingTop := writeBypassEntry 268 timingNos := writeBypassNos 269 } 270 271 } .elsewhen (io.redirect_valid && io.redirect_isRet) { 272 // getTop using redirect Nos as TOSR 273 val popRedSsp = Wire(UInt(log2Up(rasSize).W)) 274 val popRedSctr = Wire(UInt(RasCtrSize.W)) 275 val popRedTOSR = io.redirect_meta_NOS 276 val popRedTOSW = io.redirect_meta_TOSW 277 278 when (io.redirect_meta_sctr > 0.U) { 279 popRedSctr := io.redirect_meta_sctr - 1.U 280 popRedSsp := io.redirect_meta_ssp 281 } .elsewhen (TOSRinRange(popRedTOSR, TOSW)) { 282 popRedSsp := ptrDec(io.redirect_meta_ssp) 283 popRedSctr := spec_queue(popRedTOSR.value).ctr 284 } .otherwise { 285 popRedSsp := ptrDec(io.redirect_meta_ssp) 286 popRedSctr := getCommitTop(ptrDec(io.redirect_meta_ssp)).ctr 287 } 288 // We are deciding top for the next cycle, no need to use bypass here 289 timingTop := getTop(popRedSsp, popRedSctr, popRedTOSR, popRedTOSW, false) 290 } .elsewhen (io.redirect_valid) { 291 // Neither call nor ret 292 val popSsp = io.redirect_meta_ssp 293 val popSctr = io.redirect_meta_sctr 294 val popTOSR = io.redirect_meta_TOSR 295 val popTOSW = io.redirect_meta_TOSW 296 297 timingTop := getTop(popSsp, popSctr, popTOSR, popTOSW, false) 298 299 } .elsewhen (io.spec_pop_valid) { 300 // getTop using current Nos as TOSR 301 val popSsp = Wire(UInt(log2Up(rasSize).W)) 302 val popSctr = Wire(UInt(RasCtrSize.W)) 303 val popTOSR = topNos 304 val popTOSW = TOSW 305 306 when (sctr > 0.U) { 307 popSctr := sctr - 1.U 308 popSsp := ssp 309 } .elsewhen (TOSRinRange(popTOSR, TOSW)) { 310 popSsp := ptrDec(ssp) 311 popSctr := spec_queue(popTOSR.value).ctr 312 } .otherwise { 313 popSsp := ptrDec(ssp) 314 popSctr := getCommitTop(ptrDec(ssp)).ctr 315 } 316 // We are deciding top for the next cycle, no need to use bypass here 317 timingTop := getTop(popSsp, popSctr, popTOSR, popTOSW, false) 318 } .elsewhen (realPush) { 319 // just updating spec queue, cannot read from there 320 timingTop := realWriteEntry 321 } .elsewhen (io.s3_cancel) { 322 // s3 is different with s2 323 timingTop := getTop(io.s3_meta.ssp, io.s3_meta.sctr, io.s3_meta.TOSR, io.s3_meta.TOSW, false) 324 when (io.s3_missed_push) { 325 val writeEntry_s3 = Wire(new RASEntry) 326 timingTop := writeEntry_s3 327 writeEntry_s3.retAddr := io.s3_pushAddr 328 writeEntry_s3.ctr := Mux(timingTop.retAddr === io.s3_pushAddr && io.s3_meta.sctr < ctrMax, io.s3_meta.sctr + 1.U, 0.U) 329 } .elsewhen (io.s3_missed_pop) { 330 val popRedSsp_s3 = Wire(UInt(log2Up(rasSize).W)) 331 val popRedSctr_s3 = Wire(UInt(RasCtrSize.W)) 332 val popRedTOSR_s3 = io.s3_meta.NOS 333 val popRedTOSW_s3 = io.s3_meta.TOSW 334 335 when (io.s3_meta.sctr > 0.U) { 336 popRedSctr_s3 := io.s3_meta.sctr - 1.U 337 popRedSsp_s3 := io.s3_meta.ssp 338 } .elsewhen (TOSRinRange(popRedTOSR_s3, popRedTOSW_s3)) { 339 popRedSsp_s3 := ptrDec(io.s3_meta.ssp) 340 popRedSctr_s3 := spec_queue(popRedTOSR_s3.value).ctr 341 } .otherwise { 342 popRedSsp_s3 := ptrDec(io.s3_meta.ssp) 343 popRedSctr_s3 := getCommitTop(ptrDec(io.s3_meta.ssp)).ctr 344 } 345 // We are deciding top for the next cycle, no need to use bypass here 346 timingTop := getTop(popRedSsp_s3, popRedSctr_s3, popRedTOSR_s3, popRedTOSW_s3, false) 347 } 348 } .otherwise { 349 // easy case 350 val popSsp = ssp 351 val popSctr = sctr 352 val popTOSR = TOSR 353 val popTOSW = TOSW 354 timingTop := getTop(popSsp, popSctr, popTOSR, popTOSW, false) 355 } 356 val diffTop = Mux(writeBypassValid, writeBypassEntry.retAddr, topEntry.retAddr) 357 358 XSPerfAccumulate("ras_top_mismatch", diffTop =/= timingTop.retAddr); 359 // could diff when more pop than push and a commit stack is updated with inflight info 360 361 val realWriteEntry_next = RegEnable(writeEntry, io.s2_fire || io.redirect_isCall) 362 val s3_missPushEntry = Wire(new RASEntry) 363 val s3_missPushAddr = Wire(new RASPtr) 364 val s3_missPushNos = Wire(new RASPtr) 365 366 s3_missPushEntry.retAddr := io.s3_pushAddr 367 s3_missPushEntry.ctr := Mux(s3TopEntry.retAddr === io.s3_pushAddr && s3TopEntry.ctr < ctrMax, io.s3_meta.sctr + 1.U, 0.U) 368 s3_missPushAddr := io.s3_meta.TOSW 369 s3_missPushNos := io.s3_meta.TOSR 370 371 372 373 realWriteEntry := Mux(io.redirect_isCall, realWriteEntry_next, 374 Mux(io.s3_missed_push, s3_missPushEntry, 375 realWriteEntry_next)) 376 377 val realWriteAddr_next = RegEnable(Mux(io.redirect_valid && io.redirect_isCall, io.redirect_meta_TOSW, TOSW), io.s2_fire || (io.redirect_valid && io.redirect_isCall)) 378 val realWriteAddr = Mux(io.redirect_isCall, realWriteAddr_next, 379 Mux(io.s3_missed_push, s3_missPushAddr, 380 realWriteAddr_next)) 381 val realNos_next = RegEnable(Mux(io.redirect_valid && io.redirect_isCall, io.redirect_meta_TOSR, TOSR), io.s2_fire || (io.redirect_valid && io.redirect_isCall)) 382 val realNos = Mux(io.redirect_isCall, realNos_next, 383 Mux(io.s3_missed_push, s3_missPushNos, 384 realNos_next)) 385 386 realPush := (io.s3_fire && (!io.s3_cancel && RegEnable(io.spec_push_valid, io.s2_fire) || io.s3_missed_push)) || RegNext(io.redirect_valid && io.redirect_isCall) 387 388 when (realPush) { 389 spec_queue(realWriteAddr.value) := realWriteEntry 390 spec_nos(realWriteAddr.value) := realNos 391 } 392 393 def specPush(retAddr: UInt, currentSsp: UInt, currentSctr: UInt, currentTOSR: RASPtr, currentTOSW: RASPtr, topEntry: RASEntry) = { 394 TOSR := currentTOSW 395 TOSW := specPtrInc(currentTOSW) 396 // spec sp and ctr should always be maintained 397 when (topEntry.retAddr === retAddr && currentSctr < ctrMax) { 398 sctr := currentSctr + 1.U 399 } .otherwise { 400 ssp := ptrInc(currentSsp) 401 sctr := 0.U 402 } 403 // if we are draining the capacity of spec queue, force move BOS forward 404 when (specPtrInc(currentTOSW) === BOS) { 405 BOS := specPtrInc(BOS) 406 spec_overflowed := true.B; 407 } 408 } 409 410 when (io.spec_push_valid) { 411 specPush(io.spec_push_addr, ssp, sctr, TOSR, TOSW, topEntry) 412 } 413 def specPop(currentSsp: UInt, currentSctr: UInt, currentTOSR: RASPtr, currentTOSW: RASPtr, currentTopNos: RASPtr) = { 414 // TOSR is only maintained when spec queue is not empty 415 when (TOSRinRange(currentTOSR, currentTOSW)) { 416 TOSR := currentTopNos 417 } 418 // spec sp and ctr should always be maintained 419 when (currentSctr > 0.U) { 420 sctr := currentSctr - 1.U 421 } .elsewhen (TOSRinRange(currentTopNos, currentTOSW)) { 422 // in range, use inflight data 423 ssp := ptrDec(currentSsp) 424 sctr := spec_queue(currentTopNos.value).ctr 425 } .otherwise { 426 // NOS not in range, use commit data 427 ssp := ptrDec(currentSsp) 428 sctr := getCommitTop(ptrDec(currentSsp)).ctr 429 // in overflow state, we cannot determine the next sctr, sctr here is not accurate 430 } 431 } 432 when (io.spec_pop_valid) { 433 specPop(ssp, sctr, TOSR, TOSW, topNos) 434 } 435 436 // io.spec_pop_addr := Mux(writeBypassValid, writeBypassEntry.retAddr, topEntry.retAddr) 437 438 io.spec_pop_addr := timingTop.retAddr 439 io.BOS := BOS 440 io.TOSW := TOSW 441 io.TOSR := TOSR 442 io.NOS := topNos 443 io.ssp := ssp 444 io.sctr := sctr 445 io.nsp := nsp 446 447 when (io.s3_cancel) { 448 // recovery of all related pointers 449 TOSR := io.s3_meta.TOSR 450 TOSW := io.s3_meta.TOSW 451 ssp := io.s3_meta.ssp 452 sctr := io.s3_meta.sctr 453 454 // for missing pop, we also need to do a pop here 455 when (io.s3_missed_pop) { 456 specPop(io.s3_meta.ssp, io.s3_meta.sctr, io.s3_meta.TOSR, io.s3_meta.TOSW, io.s3_meta.NOS) 457 } 458 when (io.s3_missed_push) { 459 // do not use any bypass from f2 460 specPush(io.s3_pushAddr, io.s3_meta.ssp, io.s3_meta.sctr, io.s3_meta.TOSR, io.s3_meta.TOSW, s3TopEntry) 461 } 462 } 463 464 val commitTop = commit_stack(nsp) 465 466 when (io.commit_pop_valid) { 467 468 val nsp_update = Wire(UInt(log2Up(rasSize).W)) 469 when (io.commit_meta_ssp =/= nsp) { 470 // force set nsp to commit ssp to avoid permanent errors 471 nsp_update := io.commit_meta_ssp 472 } .otherwise { 473 nsp_update := nsp 474 } 475 476 // if ctr > 0, --ctr in stack, otherwise --nsp 477 when (commitTop.ctr > 0.U) { 478 commit_stack(nsp_update).ctr := commitTop.ctr - 1.U 479 nsp := nsp_update 480 } .otherwise { 481 nsp := ptrDec(nsp_update); 482 } 483 // XSError(io.commit_meta_ssp =/= nsp, "nsp mismatch with expected ssp") 484 } 485 486 val commit_push_addr = spec_queue(io.commit_meta_TOSW.value).retAddr 487 488 489 490 when (io.commit_push_valid) { 491 val nsp_update = Wire(UInt(log2Up(rasSize).W)) 492 when (io.commit_meta_ssp =/= nsp) { 493 // force set nsp to commit ssp to avoid permanent errors 494 nsp_update := io.commit_meta_ssp 495 } .otherwise { 496 nsp_update := nsp 497 } 498 // if ctr < max && topAddr == push addr, ++ctr, otherwise ++nsp 499 when (commitTop.ctr < ctrMax && commitTop.retAddr === commit_push_addr) { 500 commit_stack(nsp_update).ctr := commitTop.ctr + 1.U 501 nsp := nsp_update 502 } .otherwise { 503 nsp := ptrInc(nsp_update) 504 commit_stack(ptrInc(nsp_update)).retAddr := commit_push_addr 505 commit_stack(ptrInc(nsp_update)).ctr := 0.U 506 } 507 // when overflow, BOS may be forced move forward, do not revert those changes 508 when (!spec_overflowed || isAfter(specPtrInc(io.commit_meta_TOSW), BOS)) { 509 BOS := specPtrInc(io.commit_meta_TOSW) 510 spec_overflowed := false.B 511 } 512 513 // XSError(io.commit_meta_ssp =/= nsp, "nsp mismatch with expected ssp") 514 // XSError(io.commit_push_addr =/= commit_push_addr, "addr from commit mismatch with addr from spec") 515 } 516 517 when (io.redirect_valid) { 518 TOSR := io.redirect_meta_TOSR 519 TOSW := io.redirect_meta_TOSW 520 ssp := io.redirect_meta_ssp 521 sctr := io.redirect_meta_sctr 522 523 when (io.redirect_isCall) { 524 specPush(io.redirect_callAddr, io.redirect_meta_ssp, io.redirect_meta_sctr, io.redirect_meta_TOSR, io.redirect_meta_TOSW, redirectTopEntry) 525 } 526 when (io.redirect_isRet) { 527 specPop(io.redirect_meta_ssp, io.redirect_meta_sctr, io.redirect_meta_TOSR, io.redirect_meta_TOSW, redirectTopNos) 528 } 529 } 530 531 io.debug.commit_stack.zipWithIndex.foreach{case (a, i) => a := commit_stack(i)} 532 io.debug.spec_nos.zipWithIndex.foreach{case (a, i) => a := spec_nos(i)} 533 io.debug.spec_queue.zipWithIndex.foreach{ case (a, i) => a := spec_queue(i)} 534 } 535 536 val stack = Module(new RASStack(RasSize, RasSpecSize)).io 537 538 val s2_spec_push = WireInit(false.B) 539 val s2_spec_pop = WireInit(false.B) 540 val s2_full_pred = io.in.bits.resp_in(0).s2.full_pred(2) 541 // when last inst is an rvi call, fall through address would be set to the middle of it, so an addition is needed 542 val s2_spec_new_addr = s2_full_pred.fallThroughAddr + Mux(s2_full_pred.last_may_be_rvi_call, 2.U, 0.U) 543 stack.spec_push_valid := s2_spec_push 544 stack.spec_pop_valid := s2_spec_pop 545 stack.spec_push_addr := s2_spec_new_addr 546 547 // confirm that the call/ret is the taken cfi 548 s2_spec_push := io.s2_fire(2) && s2_full_pred.hit_taken_on_call && !io.s3_redirect(2) 549 s2_spec_pop := io.s2_fire(2) && s2_full_pred.hit_taken_on_ret && !io.s3_redirect(2) 550 551 //val s2_jalr_target = io.out.s2.full_pred.jalr_target 552 //val s2_last_target_in = s2_full_pred.targets.last 553 // val s2_last_target_out = io.out.s2.full_pred(2).targets.last 554 val s2_is_jalr = s2_full_pred.is_jalr 555 val s2_is_ret = s2_full_pred.is_ret 556 val s2_top = stack.spec_pop_addr 557 // assert(is_jalr && is_ret || !is_ret) 558 when(s2_is_ret && io.ctrl.ras_enable) { 559 io.out.s2.full_pred.map(_.jalr_target).foreach(_ := s2_top) 560 // FIXME: should use s1 globally 561 } 562 //s2_last_target_out := Mux(s2_is_jalr, s2_jalr_target, s2_last_target_in) 563 io.out.s2.full_pred.zipWithIndex.foreach{ case (a, i) => 564 a.targets.last := Mux(s2_is_jalr, io.out.s2.full_pred(i).jalr_target, io.in.bits.resp_in(0).s2.full_pred(i).targets.last) 565 } 566 567 val s2_meta = Wire(new RASMeta) 568 s2_meta.ssp := stack.ssp 569 s2_meta.sctr := stack.sctr 570 s2_meta.TOSR := stack.TOSR 571 s2_meta.TOSW := stack.TOSW 572 s2_meta.NOS := stack.NOS 573 574 val s3_top = RegEnable(stack.spec_pop_addr, io.s2_fire(2)) 575 val s3_spec_new_addr = RegEnable(s2_spec_new_addr, io.s2_fire(2)) 576 577 // val s3_jalr_target = io.out.s3.full_pred.jalr_target 578 // val s3_last_target_in = io.in.bits.resp_in(0).s3.full_pred(2).targets.last 579 // val s3_last_target_out = io.out.s3.full_pred(2).targets.last 580 val s3_is_jalr = io.in.bits.resp_in(0).s3.full_pred(2).is_jalr 581 val s3_is_ret = io.in.bits.resp_in(0).s3.full_pred(2).is_ret 582 // assert(is_jalr && is_ret || !is_ret) 583 when(s3_is_ret && io.ctrl.ras_enable) { 584 io.out.s3.full_pred.map(_.jalr_target).foreach(_ := s3_top) 585 // FIXME: should use s1 globally 586 } 587 // s3_last_target_out := Mux(s3_is_jalr, s3_jalr_target, s3_last_target_in) 588 io.out.s3.full_pred.zipWithIndex.foreach{ case (a, i) => 589 a.targets.last := Mux(s3_is_jalr, io.out.s3.full_pred(i).jalr_target, io.in.bits.resp_in(0).s3.full_pred(i).targets.last) 590 } 591 592 val s3_pushed_in_s2 = RegEnable(s2_spec_push, io.s2_fire(2)) 593 val s3_popped_in_s2 = RegEnable(s2_spec_pop, io.s2_fire(2)) 594 val s3_push = io.in.bits.resp_in(0).s3.full_pred(2).hit_taken_on_call 595 val s3_pop = io.in.bits.resp_in(0).s3.full_pred(2).hit_taken_on_ret 596 597 val s3_cancel = io.s3_fire(2) && (s3_pushed_in_s2 =/= s3_push || s3_popped_in_s2 =/= s3_pop) 598 stack.s2_fire := io.s2_fire(2) 599 stack.s3_fire := io.s3_fire(2) 600 601 stack.s3_cancel := s3_cancel 602 603 val s3_meta = RegEnable(s2_meta, io.s2_fire(2)) 604 605 stack.s3_meta := s3_meta 606 stack.s3_missed_pop := s3_pop && !s3_popped_in_s2 607 stack.s3_missed_push := s3_push && !s3_pushed_in_s2 608 stack.s3_pushAddr := s3_spec_new_addr 609 610 // no longer need the top Entry, but TOSR, TOSW, ssp sctr 611 // TODO: remove related signals 612 io.out.last_stage_spec_info.sctr := s3_meta.sctr 613 io.out.last_stage_spec_info.ssp := s3_meta.ssp 614 io.out.last_stage_spec_info.TOSW := s3_meta.TOSW 615 io.out.last_stage_spec_info.TOSR := s3_meta.TOSR 616 io.out.last_stage_spec_info.NOS := s3_meta.NOS 617 io.out.last_stage_spec_info.topAddr := s3_top 618 io.out.last_stage_meta := s3_meta.asUInt 619 620 621 val redirect = RegNextWithEnable(io.redirect) 622 val do_recover = redirect.valid 623 val recover_cfi = redirect.bits.cfiUpdate 624 625 val retMissPred = do_recover && redirect.bits.level === 0.U && recover_cfi.pd.isRet 626 val callMissPred = do_recover && redirect.bits.level === 0.U && recover_cfi.pd.isCall 627 // when we mispredict a call, we must redo a push operation 628 // similarly, when we mispredict a return, we should redo a pop 629 stack.redirect_valid := do_recover 630 stack.redirect_isCall := callMissPred 631 stack.redirect_isRet := retMissPred 632 stack.redirect_meta_ssp := recover_cfi.ssp 633 stack.redirect_meta_sctr := recover_cfi.sctr 634 stack.redirect_meta_TOSW := recover_cfi.TOSW 635 stack.redirect_meta_TOSR := recover_cfi.TOSR 636 stack.redirect_meta_NOS := recover_cfi.NOS 637 stack.redirect_callAddr := recover_cfi.pc + Mux(recover_cfi.pd.isRVC, 2.U, 4.U) 638 639 val update = io.update.bits 640 val updateMeta = io.update.bits.meta.asTypeOf(new RASMeta) 641 val updateValid = io.update.valid 642 643 stack.commit_push_valid := updateValid && update.is_call_taken 644 stack.commit_pop_valid := updateValid && update.is_ret_taken 645 stack.commit_push_addr := update.ftb_entry.getFallThrough(update.pc) + Mux(update.ftb_entry.last_may_be_rvi_call, 2.U, 0.U) 646 stack.commit_meta_TOSW := updateMeta.TOSW 647 stack.commit_meta_TOSR := updateMeta.TOSR 648 stack.commit_meta_ssp := updateMeta.ssp 649 stack.commit_meta_sctr := updateMeta.sctr 650 651 652 XSPerfAccumulate("ras_s3_cancel", s3_cancel) 653 XSPerfAccumulate("ras_redirect_recover", redirect.valid) 654 XSPerfAccumulate("ras_s3_and_redirect_recover_at_the_same_time", s3_cancel && redirect.valid) 655 656 657 val spec_debug = stack.debug 658 XSDebug(io.s2_fire(2), "----------------RAS----------------\n") 659 XSDebug(io.s2_fire(2), " TopRegister: 0x%x\n",stack.spec_pop_addr) 660 XSDebug(io.s2_fire(2), " index addr ctr nos (spec part)\n") 661 for(i <- 0 until RasSpecSize){ 662 XSDebug(io.s2_fire(2), " (%d) 0x%x %d %d",i.U,spec_debug.spec_queue(i).retAddr,spec_debug.spec_queue(i).ctr, spec_debug.spec_nos(i).value) 663 when(i.U === stack.TOSW.value){XSDebug(io.s2_fire(2), " <----TOSW")} 664 when(i.U === stack.TOSR.value){XSDebug(io.s2_fire(2), " <----TOSR")} 665 when(i.U === stack.BOS.value){XSDebug(io.s2_fire(2), " <----BOS")} 666 XSDebug(io.s2_fire(2), "\n") 667 } 668 XSDebug(io.s2_fire(2), " index addr ctr (committed part)\n") 669 for(i <- 0 until RasSize){ 670 XSDebug(io.s2_fire(2), " (%d) 0x%x %d",i.U,spec_debug.commit_stack(i).retAddr,spec_debug.commit_stack(i).ctr) 671 when(i.U === stack.ssp){XSDebug(io.s2_fire(2), " <----ssp")} 672 when(i.U === stack.nsp){XSDebug(io.s2_fire(2), " <----nsp")} 673 XSDebug(io.s2_fire(2), "\n") 674 } 675 /* 676 XSDebug(s2_spec_push, "s2_spec_push inAddr: 0x%x inCtr: %d | allocNewEntry:%d | sp:%d \n", 677 s2_spec_new_addr,spec_debug.spec_push_entry.ctr,spec_debug.spec_alloc_new,spec_debug.sp.asUInt) 678 XSDebug(s2_spec_pop, "s2_spec_pop outAddr: 0x%x \n",io.out.s2.getTarget) 679 val s3_recover_entry = spec_debug.recover_push_entry 680 XSDebug(s3_recover && s3_push, "s3_recover_push inAddr: 0x%x inCtr: %d | allocNewEntry:%d | sp:%d \n", 681 s3_recover_entry.retAddr, s3_recover_entry.ctr, spec_debug.recover_alloc_new, s3_sp.asUInt) 682 XSDebug(s3_recover && s3_pop, "s3_recover_pop outAddr: 0x%x \n",io.out.s3.getTarget) 683 val redirectUpdate = redirect.bits.cfiUpdate 684 XSDebug(do_recover && callMissPred, "redirect_recover_push\n") 685 XSDebug(do_recover && retMissPred, "redirect_recover_pop\n") 686 XSDebug(do_recover, "redirect_recover(SP:%d retAddr:%x ctr:%d) \n", 687 redirectUpdate.rasSp,redirectUpdate.rasEntry.retAddr,redirectUpdate.rasEntry.ctr) 688 */ 689 690 generatePerfEvent() 691} 692