xref: /XiangShan/src/main/scala/xiangshan/frontend/newRAS.scala (revision c89b46421f4e4f58aeacd51297260c254a386e8b)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16package xiangshan.frontend
17
18import chipsalliance.rocketchip.config.Parameters
19import chisel3._
20import chisel3.experimental.chiselName
21import chisel3.util._
22import utils._
23import utility._
24import xiangshan._
25import xiangshan.frontend._
26
27class RASEntry()(implicit p: Parameters) extends XSBundle {
28    val retAddr = UInt(VAddrBits.W)
29    val ctr = UInt(8.W) // layer of nested call functions
30    def =/=(that: RASEntry) = this.retAddr =/= that.retAddr || this.ctr =/= that.ctr
31}
32
33class RASPtr(implicit p: Parameters) extends CircularQueuePtr[RASPtr](
34  p => p(XSCoreParamsKey).RasSpecSize
35){
36}
37
38object RASPtr {
39  def apply(f: Bool, v: UInt)(implicit p: Parameters): RASPtr = {
40    val ptr = Wire(new RASPtr)
41    ptr.flag := f
42    ptr.value := v
43    ptr
44  }
45  def inverse(ptr: RASPtr)(implicit p: Parameters): RASPtr = {
46    apply(!ptr.flag, ptr.value)
47  }
48}
49
50class RASMeta(implicit p: Parameters) extends XSBundle {
51  val ssp = UInt(log2Up(RasSize).W)
52  val sctr = UInt(log2Up(RasCtrSize).W)
53  val TOSW = new RASPtr
54  val TOSR = new RASPtr
55  val NOS = new RASPtr
56}
57
58object RASMeta {
59  def apply(ssp: UInt, sctr: UInt, TOSW: RASPtr, TOSR: RASPtr, NOS: RASPtr)(implicit p: Parameters):RASMeta = {
60    val e = Wire(new RASMeta)
61    e.ssp := ssp
62    e.sctr := sctr
63    e.TOSW := TOSW
64    e.TOSR := TOSR
65    e.NOS := NOS
66    e
67  }
68}
69
70class RASDebug(implicit p: Parameters) extends XSBundle {
71  val spec_queue = Output(Vec(RasSpecSize, new RASEntry))
72  val spec_nos = Output(Vec(RasSpecSize, new RASPtr))
73  val commit_stack = Output(Vec(RasSize, new RASEntry))
74}
75
76@chiselName
77class RAS(implicit p: Parameters) extends BasePredictor {
78  override val meta_size = WireInit(0.U.asTypeOf(new RASMeta)).getWidth
79
80  object RASEntry {
81    def apply(retAddr: UInt, ctr: UInt): RASEntry = {
82      val e = Wire(new RASEntry)
83      e.retAddr := retAddr
84      e.ctr := ctr
85      e
86    }
87  }
88
89
90  @chiselName
91  class RASStack(rasSize: Int, rasSpecSize: Int) extends XSModule with HasCircularQueuePtrHelper {
92    val io = IO(new Bundle {
93      val spec_push_valid = Input(Bool())
94      val spec_pop_valid = Input(Bool())
95      val spec_push_addr = Input(UInt(VAddrBits.W))
96      // for write bypass between s2 and s3
97
98      val s2_fire = Input(Bool())
99      val s3_fire = Input(Bool())
100      val s3_cancel = Input(Bool())
101      val s3_meta = Input(new RASMeta)
102      val s3_missed_pop = Input(Bool())
103      val s3_missed_push = Input(Bool())
104      val s3_pushAddr = Input(UInt(VAddrBits.W))
105      val spec_pop_addr = Output(UInt(VAddrBits.W))
106
107      val commit_push_valid = Input(Bool())
108      val commit_pop_valid = Input(Bool())
109      val commit_push_addr = Input(UInt(VAddrBits.W))
110      val commit_meta_TOSW = Input(new RASPtr)
111      val commit_meta_TOSR = Input(new RASPtr)
112      // for debug purpose only
113      val commit_meta_ssp = Input(UInt(log2Up(RasSize).W))
114      val commit_meta_sctr = Input(UInt(log2Up(RasCtrSize).W))
115
116      val redirect_valid = Input(Bool())
117      val redirect_isCall = Input(Bool())
118      val redirect_isRet = Input(Bool())
119      val redirect_meta_ssp = Input(UInt(log2Up(RasSize).W))
120      val redirect_meta_sctr = Input(UInt(log2Up(RasCtrSize).W))
121      val redirect_meta_TOSW = Input(new RASPtr)
122      val redirect_meta_TOSR = Input(new RASPtr)
123      val redirect_meta_NOS = Input(new RASPtr)
124      val redirect_callAddr = Input(UInt(VAddrBits.W))
125
126      val ssp = Output(UInt(log2Up(RasSize).W))
127      val sctr = Output(UInt(log2Up(RasCtrSize).W))
128      val nsp = Output(UInt(log2Up(RasSize).W))
129      val TOSR = Output(new RASPtr)
130      val TOSW = Output(new RASPtr)
131      val NOS = Output(new RASPtr)
132      val BOS = Output(new RASPtr)
133
134      val debug = new RASDebug
135    })
136
137    val commit_stack = RegInit(VecInit(Seq.fill(RasSize)(RASEntry(0.U, 0.U))))
138    val spec_queue = RegInit(VecInit(Seq.fill(rasSpecSize)(RASEntry(0.U, 0.U))))
139    val spec_nos = RegInit(VecInit(Seq.fill(rasSpecSize)(RASPtr(false.B, 0.U))))
140
141    val nsp = RegInit(0.U(log2Up(rasSize).W))
142    val ssp = RegInit(0.U(log2Up(rasSize).W))
143
144    val sctr = RegInit(0.U(RasCtrSize.W))
145    val TOSR = RegInit(RASPtr(true.B, (RasSpecSize - 1).U))
146    val TOSW = RegInit(RASPtr(false.B, 0.U))
147    val BOS = RegInit(RASPtr(false.B, 0.U))
148
149    val spec_overflowed = RegInit(false.B)
150
151    val writeBypassEntry = Reg(new RASEntry)
152    val writeBypassNos = Reg(new RASPtr)
153
154    val writeBypassValid = RegInit(0.B)
155    val writeBypassValidWire = Wire(Bool())
156
157    def TOSRinRange(currentTOSR: RASPtr, currentTOSW: RASPtr) = {
158      val inflightValid = WireInit(false.B)
159      // if in range, TOSR should be no younger than BOS and strictly younger than TOSW
160      when (!isBefore(currentTOSR, BOS) && isBefore(currentTOSR, currentTOSW)) {
161        inflightValid := true.B
162      }
163      inflightValid
164    }
165
166    def getCommitTop(currentSsp: UInt) = {
167      commit_stack(currentSsp)
168    }
169
170    def getTopNos(currentTOSR: RASPtr, allowBypass: Boolean):RASPtr = {
171      val ret = Wire(new RASPtr)
172      if (allowBypass){
173        when (writeBypassValid) {
174          ret := writeBypassNos
175        } .otherwise {
176          ret := spec_nos(TOSR.value)
177        }
178      } else {
179        ret := spec_nos(TOSR.value) // invalid when TOSR is not in range
180      }
181      ret
182    }
183
184    def getTop(currentSsp: UInt, currentSctr: UInt, currentTOSR: RASPtr, currentTOSW: RASPtr, allowBypass: Boolean):RASEntry = {
185      val ret = Wire(new RASEntry)
186      if (allowBypass) {
187        when (writeBypassValid) {
188          ret := writeBypassEntry
189        } .elsewhen (TOSRinRange(currentTOSR, currentTOSW)) {
190          ret := spec_queue(currentTOSR.value)
191        } .otherwise {
192          ret := getCommitTop(currentSsp)
193        }
194      } else {
195        when (TOSRinRange(currentTOSR, currentTOSW)) {
196          ret := spec_queue(currentTOSR.value)
197        } .otherwise {
198          ret := getCommitTop(currentSsp)
199        }
200      }
201
202      ret
203    }
204
205    // it would be unsafe for specPtr manipulation if specSize is not power of 2
206    assert(log2Up(RasSpecSize) == log2Floor(RasSpecSize))
207    def ctrMax = ((1l << RasCtrSize) - 1).U
208    def ptrInc(ptr: UInt) = ptr + 1.U
209    def ptrDec(ptr: UInt) = ptr - 1.U
210
211    def specPtrInc(ptr: RASPtr) = ptr + 1.U
212    def specPtrDec(ptr: RASPtr) = ptr - 1.U
213
214
215
216
217
218
219    when (io.redirect_valid && io.redirect_isCall) {
220      writeBypassValidWire := true.B
221      writeBypassValid := true.B
222    } .elsewhen (io.redirect_valid) {
223      // clear current top writeBypass if doing redirect
224      writeBypassValidWire := false.B
225      writeBypassValid := false.B
226    } .elsewhen (io.s2_fire) {
227      writeBypassValidWire := io.spec_push_valid
228      writeBypassValid := io.spec_push_valid
229    } .elsewhen (io.s3_fire) {
230      writeBypassValidWire := false.B
231      writeBypassValid := false.B
232    } .otherwise {
233      writeBypassValidWire := writeBypassValid
234    }
235
236
237
238    val topEntry = getTop(ssp, sctr, TOSR, TOSW, true)
239    val topNos = getTopNos(TOSR, true)
240    val redirectTopEntry = getTop(io.redirect_meta_ssp, io.redirect_meta_sctr, io.redirect_meta_TOSR, io.redirect_meta_TOSW, false)
241    val redirectTopNos = io.redirect_meta_NOS
242    val s3TopEntry = getTop(io.s3_meta.ssp, io.s3_meta.sctr, io.s3_meta.TOSR, io.s3_meta.TOSW, false)
243    val s3TopNos = io.s3_meta.NOS
244
245    val writeEntry = Wire(new RASEntry)
246    val writeNos = Wire(new RASPtr)
247    writeEntry.retAddr := Mux(io.redirect_valid && io.redirect_isCall,  io.redirect_callAddr, io.spec_push_addr)
248    writeEntry.ctr := Mux(io.redirect_valid && io.redirect_isCall,
249      Mux(redirectTopEntry.retAddr === io.redirect_callAddr && redirectTopEntry.ctr < ctrMax, io.redirect_meta_sctr + 1.U, 0.U),
250      Mux(topEntry.retAddr === io.spec_push_addr && topEntry.ctr < ctrMax, sctr + 1.U, 0.U))
251
252    writeNos := Mux(io.redirect_valid && io.redirect_isCall,
253      io.redirect_meta_NOS, TOSR)
254
255    when (io.spec_push_valid || (io.redirect_valid && io.redirect_isCall)) {
256      writeBypassEntry := writeEntry
257      writeBypassNos := writeNos
258    }
259
260    val realPush = Wire(Bool())
261    val realWriteEntry = Wire(new RASEntry)
262    val timingTop = RegInit(0.U.asTypeOf(new RASEntry))
263    val timingNos = RegInit(0.U.asTypeOf(new RASPtr))
264
265    when (writeBypassValidWire) {
266      when ((io.redirect_valid && io.redirect_isCall) || io.spec_push_valid) {
267        timingTop := writeEntry
268        timingNos := writeNos
269      } .otherwise {
270        timingTop := writeBypassEntry
271        timingNos := writeBypassNos
272      }
273
274    } .elsewhen (io.redirect_valid && io.redirect_isRet) {
275      // getTop using redirect Nos as TOSR
276      val popRedSsp = Wire(UInt(log2Up(rasSize).W))
277      val popRedSctr = Wire(UInt(log2Up(RasCtrSize).W))
278      val popRedTOSR = io.redirect_meta_NOS
279      val popRedTOSW = io.redirect_meta_TOSW
280
281      when (io.redirect_meta_sctr > 0.U) {
282        popRedSctr := io.redirect_meta_sctr - 1.U
283        popRedSsp := io.redirect_meta_ssp
284      } .elsewhen (TOSRinRange(popRedTOSR, TOSW)) {
285        popRedSsp := ptrDec(io.redirect_meta_ssp)
286        popRedSctr := spec_queue(popRedTOSR.value).ctr
287      } .otherwise {
288        popRedSsp := ptrDec(io.redirect_meta_ssp)
289        popRedSctr := getCommitTop(ptrDec(io.redirect_meta_ssp)).ctr
290      }
291      // We are deciding top for the next cycle, no need to use bypass here
292      timingTop := getTop(popRedSsp, popRedSctr, popRedTOSR, popRedTOSW, false)
293    } .elsewhen (io.redirect_valid) {
294      // Neither call nor ret
295      val popSsp = io.redirect_meta_ssp
296      val popSctr = io.redirect_meta_sctr
297      val popTOSR = io.redirect_meta_TOSR
298      val popTOSW = io.redirect_meta_TOSW
299
300      timingTop := getTop(popSsp, popSctr, popTOSR, popTOSW, false)
301
302    } .elsewhen (io.spec_pop_valid) {
303      // getTop using current Nos as TOSR
304      val popSsp = Wire(UInt(log2Up(rasSize).W))
305      val popSctr = Wire(UInt(log2Up(RasCtrSize).W))
306      val popTOSR = topNos
307      val popTOSW = TOSW
308
309      when (sctr > 0.U) {
310        popSctr := sctr - 1.U
311        popSsp := ssp
312      } .elsewhen (TOSRinRange(popTOSR, TOSW)) {
313        popSsp := ptrDec(ssp)
314        popSctr := spec_queue(popTOSR.value).ctr
315      } .otherwise {
316        popSsp := ptrDec(ssp)
317        popSctr := getCommitTop(ptrDec(ssp)).ctr
318      }
319      // We are deciding top for the next cycle, no need to use bypass here
320      timingTop := getTop(popSsp, popSctr, popTOSR, popTOSW, false)
321    } .elsewhen (realPush) {
322      // just updating spec queue, cannot read from there
323      timingTop := realWriteEntry
324    } .elsewhen (io.s3_cancel) {
325      // s3 is different with s2
326      timingTop := getTop(io.s3_meta.ssp, io.s3_meta.sctr, io.s3_meta.TOSR, io.s3_meta.TOSW, false)
327      when (io.s3_missed_push) {
328        val writeEntry_s3 = Wire(new RASEntry)
329        timingTop := writeEntry_s3
330        writeEntry_s3.retAddr := io.s3_pushAddr
331        writeEntry_s3.ctr := Mux(timingTop.retAddr === io.s3_pushAddr && io.s3_meta.sctr < ctrMax, io.s3_meta.sctr + 1.U, 0.U)
332      } .elsewhen (io.s3_missed_pop) {
333        val popRedSsp_s3 = Wire(UInt(log2Up(rasSize).W))
334        val popRedSctr_s3 = Wire(UInt(log2Up(RasCtrSize).W))
335        val popRedTOSR_s3 = io.s3_meta.NOS
336        val popRedTOSW_s3 = io.s3_meta.TOSW
337
338        when (io.s3_meta.sctr > 0.U) {
339          popRedSctr_s3 := io.s3_meta.sctr - 1.U
340          popRedSsp_s3 := io.s3_meta.ssp
341        } .elsewhen (TOSRinRange(popRedTOSR_s3, popRedTOSW_s3)) {
342          popRedSsp_s3 := ptrDec(io.s3_meta.ssp)
343          popRedSctr_s3 := spec_queue(popRedTOSR_s3.value).ctr
344        } .otherwise {
345          popRedSsp_s3 := ptrDec(io.s3_meta.ssp)
346          popRedSctr_s3 := getCommitTop(ptrDec(io.s3_meta.ssp)).ctr
347        }
348        // We are deciding top for the next cycle, no need to use bypass here
349        timingTop := getTop(popRedSsp_s3, popRedSctr_s3, popRedTOSR_s3, popRedTOSW_s3, false)
350      }
351    } .otherwise {
352      // easy case
353      val popSsp = ssp
354      val popSctr = sctr
355      val popTOSR = TOSR
356      val popTOSW = TOSW
357      timingTop := getTop(popSsp, popSctr, popTOSR, popTOSW, false)
358    }
359    val diffTop = Mux(writeBypassValid, writeBypassEntry.retAddr, topEntry.retAddr)
360
361    XSPerfAccumulate("ras_top_mismatch", diffTop =/= timingTop.retAddr);
362    // could diff when more pop than push and a commit stack is updated with inflight info
363
364    val realWriteEntry_next = RegEnable(writeEntry, io.s2_fire || io.redirect_isCall)
365    val s3_missPushEntry = Wire(new RASEntry)
366    val s3_missPushAddr = Wire(new RASPtr)
367    val s3_missPushNos = Wire(new RASPtr)
368
369    s3_missPushEntry.retAddr := io.s3_pushAddr
370    s3_missPushEntry.ctr := Mux(s3TopEntry.retAddr === io.s3_pushAddr && s3TopEntry.ctr < ctrMax, io.s3_meta.sctr + 1.U, 0.U)
371    s3_missPushAddr := io.s3_meta.TOSW
372    s3_missPushNos := io.s3_meta.TOSR
373
374
375
376    realWriteEntry := Mux(io.redirect_isCall, realWriteEntry_next,
377      Mux(io.s3_missed_push, s3_missPushEntry,
378      realWriteEntry_next))
379
380    val realWriteAddr_next = RegEnable(Mux(io.redirect_valid && io.redirect_isCall, io.redirect_meta_TOSW, TOSW), io.s2_fire || (io.redirect_valid && io.redirect_isCall))
381    val realWriteAddr = Mux(io.redirect_isCall, realWriteAddr_next,
382      Mux(io.s3_missed_push, s3_missPushAddr,
383      realWriteAddr_next))
384    val realNos_next = RegEnable(Mux(io.redirect_valid && io.redirect_isCall, io.redirect_meta_TOSR, TOSR), io.s2_fire || (io.redirect_valid && io.redirect_isCall))
385    val realNos = Mux(io.redirect_isCall, realNos_next,
386      Mux(io.s3_missed_push, s3_missPushNos,
387      realNos_next))
388
389    realPush := (io.s3_fire && (!io.s3_cancel && RegEnable(io.spec_push_valid, io.s2_fire) || io.s3_missed_push)) || RegNext(io.redirect_valid && io.redirect_isCall)
390
391    when (realPush) {
392      spec_queue(realWriteAddr.value) := realWriteEntry
393      spec_nos(realWriteAddr.value) := realNos
394    }
395
396    def specPush(retAddr: UInt, currentSsp: UInt, currentSctr: UInt, currentTOSR: RASPtr, currentTOSW: RASPtr, topEntry: RASEntry) = {
397      TOSR := currentTOSW
398      TOSW := specPtrInc(currentTOSW)
399      // spec sp and ctr should always be maintained
400      when (topEntry.retAddr === retAddr && currentSctr < ctrMax) {
401        sctr := currentSctr + 1.U
402      } .otherwise {
403        ssp := ptrInc(currentSsp)
404        sctr := 0.U
405      }
406      // if we are draining the capacity of spec queue, force move BOS forward
407      when (specPtrInc(currentTOSW) === BOS) {
408        BOS := specPtrInc(BOS)
409        spec_overflowed := true.B;
410      }
411    }
412
413    when (io.spec_push_valid) {
414      specPush(io.spec_push_addr, ssp, sctr, TOSR, TOSW, topEntry)
415    }
416    def specPop(currentSsp: UInt, currentSctr: UInt, currentTOSR: RASPtr, currentTOSW: RASPtr, currentTopNos: RASPtr) = {
417      // TOSR is only maintained when spec queue is not empty
418      when (TOSRinRange(currentTOSR, currentTOSW)) {
419        TOSR := currentTopNos
420      }
421      // spec sp and ctr should always be maintained
422      when (currentSctr > 0.U) {
423        sctr := currentSctr - 1.U
424      } .elsewhen (TOSRinRange(currentTopNos, currentTOSW)) {
425        // in range, use inflight data
426        ssp := ptrDec(currentSsp)
427        sctr := spec_queue(currentTopNos.value).ctr
428      } .otherwise {
429        // NOS not in range, use commit data
430        ssp := ptrDec(currentSsp)
431        sctr := getCommitTop(ptrDec(currentSsp)).ctr
432        // in overflow state, we cannot determine the next sctr, sctr here is not accurate
433      }
434    }
435    when (io.spec_pop_valid) {
436      specPop(ssp, sctr, TOSR, TOSW, topNos)
437    }
438
439    // io.spec_pop_addr := Mux(writeBypassValid, writeBypassEntry.retAddr, topEntry.retAddr)
440
441    io.spec_pop_addr := timingTop.retAddr
442    io.BOS := BOS
443    io.TOSW := TOSW
444    io.TOSR := TOSR
445    io.NOS := topNos
446    io.ssp := ssp
447    io.sctr := sctr
448    io.nsp := nsp
449
450    when (io.s3_cancel) {
451      // recovery of all related pointers
452      TOSR := io.s3_meta.TOSR
453      TOSW := io.s3_meta.TOSW
454      ssp := io.s3_meta.ssp
455      sctr := io.s3_meta.sctr
456
457      // for missing pop, we also need to do a pop here
458      when (io.s3_missed_pop) {
459        specPop(io.s3_meta.ssp, io.s3_meta.sctr, io.s3_meta.TOSR, io.s3_meta.TOSW, io.s3_meta.NOS)
460      }
461      when (io.s3_missed_push) {
462        // do not use any bypass from f2
463        specPush(io.s3_pushAddr, io.s3_meta.ssp, io.s3_meta.sctr, io.s3_meta.TOSR, io.s3_meta.TOSW, s3TopEntry)
464      }
465    }
466
467    val commitTop = commit_stack(nsp)
468
469    when (io.commit_pop_valid) {
470
471      val nsp_update = Wire(UInt(log2Up(rasSize).W))
472      when (io.commit_meta_ssp =/= nsp) {
473        // force set nsp to commit ssp to avoid permanent errors
474        nsp_update := io.commit_meta_ssp
475      } .otherwise {
476        nsp_update := nsp
477      }
478
479      // if ctr > 0, --ctr in stack, otherwise --nsp
480      when (commitTop.ctr > 0.U) {
481        commit_stack(nsp_update).ctr := commitTop.ctr - 1.U
482        nsp := nsp_update
483      } .otherwise {
484        nsp := ptrDec(nsp_update);
485      }
486      // XSError(io.commit_meta_ssp =/= nsp, "nsp mismatch with expected ssp")
487    }
488
489    val commit_push_addr = spec_queue(io.commit_meta_TOSW.value).retAddr
490
491
492
493    when (io.commit_push_valid) {
494      val nsp_update = Wire(UInt(log2Up(rasSize).W))
495      when (io.commit_meta_ssp =/= nsp) {
496        // force set nsp to commit ssp to avoid permanent errors
497        nsp_update := io.commit_meta_ssp
498      } .otherwise {
499        nsp_update := nsp
500      }
501      // if ctr < max && topAddr == push addr, ++ctr, otherwise ++nsp
502      when (commitTop.ctr < ctrMax && commitTop.retAddr === commit_push_addr) {
503        commit_stack(nsp_update).ctr := commitTop.ctr + 1.U
504        nsp := nsp_update
505      } .otherwise {
506        nsp := ptrInc(nsp_update)
507        commit_stack(ptrInc(nsp_update)).retAddr := commit_push_addr
508        commit_stack(ptrInc(nsp_update)).ctr := 0.U
509      }
510      // when overflow, BOS may be forced move forward, do not revert those changes
511      when (!spec_overflowed || isAfter(specPtrInc(io.commit_meta_TOSW), BOS)) {
512        BOS := specPtrInc(io.commit_meta_TOSW)
513        spec_overflowed := false.B
514      }
515
516      // XSError(io.commit_meta_ssp =/= nsp, "nsp mismatch with expected ssp")
517      // XSError(io.commit_push_addr =/= commit_push_addr, "addr from commit mismatch with addr from spec")
518    }
519
520    when (io.redirect_valid) {
521      TOSR := io.redirect_meta_TOSR
522      TOSW := io.redirect_meta_TOSW
523      ssp := io.redirect_meta_ssp
524      sctr := io.redirect_meta_sctr
525
526      when (io.redirect_isCall) {
527        specPush(io.redirect_callAddr, io.redirect_meta_ssp, io.redirect_meta_sctr, io.redirect_meta_TOSR, io.redirect_meta_TOSW, redirectTopEntry)
528      }
529      when (io.redirect_isRet) {
530        specPop(io.redirect_meta_ssp, io.redirect_meta_sctr, io.redirect_meta_TOSR, io.redirect_meta_TOSW, redirectTopNos)
531      }
532    }
533
534    io.debug.commit_stack.zipWithIndex.foreach{case (a, i) => a := commit_stack(i)}
535    io.debug.spec_nos.zipWithIndex.foreach{case (a, i) => a := spec_nos(i)}
536    io.debug.spec_queue.zipWithIndex.foreach{ case (a, i) => a := spec_queue(i)}
537  }
538
539  val stack = Module(new RASStack(RasSize, RasSpecSize)).io
540
541  val s2_spec_push = WireInit(false.B)
542  val s2_spec_pop = WireInit(false.B)
543  val s2_full_pred = io.in.bits.resp_in(0).s2.full_pred(2)
544  // when last inst is an rvi call, fall through address would be set to the middle of it, so an addition is needed
545  val s2_spec_new_addr = s2_full_pred.fallThroughAddr + Mux(s2_full_pred.last_may_be_rvi_call, 2.U, 0.U)
546  stack.spec_push_valid := s2_spec_push
547  stack.spec_pop_valid  := s2_spec_pop
548  stack.spec_push_addr := s2_spec_new_addr
549
550  // confirm that the call/ret is the taken cfi
551  s2_spec_push := io.s2_fire(2) && s2_full_pred.hit_taken_on_call && !io.s3_redirect(2)
552  s2_spec_pop  := io.s2_fire(2) && s2_full_pred.hit_taken_on_ret  && !io.s3_redirect(2)
553
554  //val s2_jalr_target = io.out.s2.full_pred.jalr_target
555  //val s2_last_target_in = s2_full_pred.targets.last
556  // val s2_last_target_out = io.out.s2.full_pred(2).targets.last
557  val s2_is_jalr = s2_full_pred.is_jalr
558  val s2_is_ret = s2_full_pred.is_ret
559  val s2_top = stack.spec_pop_addr
560  // assert(is_jalr && is_ret || !is_ret)
561  when(s2_is_ret && io.ctrl.ras_enable) {
562    io.out.s2.full_pred.map(_.jalr_target).foreach(_ := s2_top)
563    // FIXME: should use s1 globally
564  }
565  //s2_last_target_out := Mux(s2_is_jalr, s2_jalr_target, s2_last_target_in)
566  io.out.s2.full_pred.zipWithIndex.foreach{ case (a, i) =>
567    a.targets.last := Mux(s2_is_jalr, io.out.s2.full_pred(i).jalr_target, io.in.bits.resp_in(0).s2.full_pred(i).targets.last)
568  }
569
570  val s2_meta = Wire(new RASMeta)
571  s2_meta.ssp := stack.ssp
572  s2_meta.sctr := stack.sctr
573  s2_meta.TOSR := stack.TOSR
574  s2_meta.TOSW := stack.TOSW
575  s2_meta.NOS := stack.NOS
576
577  val s3_top = RegEnable(stack.spec_pop_addr, io.s2_fire(2))
578  val s3_spec_new_addr = RegEnable(s2_spec_new_addr, io.s2_fire(2))
579
580  // val s3_jalr_target = io.out.s3.full_pred.jalr_target
581  // val s3_last_target_in = io.in.bits.resp_in(0).s3.full_pred(2).targets.last
582  // val s3_last_target_out = io.out.s3.full_pred(2).targets.last
583  val s3_is_jalr = io.in.bits.resp_in(0).s3.full_pred(2).is_jalr
584  val s3_is_ret = io.in.bits.resp_in(0).s3.full_pred(2).is_ret
585  // assert(is_jalr && is_ret || !is_ret)
586  when(s3_is_ret && io.ctrl.ras_enable) {
587    io.out.s3.full_pred.map(_.jalr_target).foreach(_ := s3_top)
588    // FIXME: should use s1 globally
589  }
590  // s3_last_target_out := Mux(s3_is_jalr, s3_jalr_target, s3_last_target_in)
591  io.out.s3.full_pred.zipWithIndex.foreach{ case (a, i) =>
592    a.targets.last := Mux(s3_is_jalr, io.out.s3.full_pred(i).jalr_target, io.in.bits.resp_in(0).s3.full_pred(i).targets.last)
593  }
594
595  val s3_pushed_in_s2 = RegEnable(s2_spec_push, io.s2_fire(2))
596  val s3_popped_in_s2 = RegEnable(s2_spec_pop,  io.s2_fire(2))
597  val s3_push = io.in.bits.resp_in(0).s3.full_pred(2).hit_taken_on_call
598  val s3_pop  = io.in.bits.resp_in(0).s3.full_pred(2).hit_taken_on_ret
599
600  val s3_cancel = io.s3_fire(2) && (s3_pushed_in_s2 =/= s3_push || s3_popped_in_s2 =/= s3_pop)
601  stack.s2_fire := io.s2_fire(2)
602  stack.s3_fire := io.s3_fire(2)
603
604  stack.s3_cancel := s3_cancel
605
606  val s3_meta = RegEnable(s2_meta, io.s2_fire(2))
607
608  stack.s3_meta := s3_meta
609  stack.s3_missed_pop := s3_pop && !s3_popped_in_s2
610  stack.s3_missed_push := s3_push && !s3_pushed_in_s2
611  stack.s3_pushAddr := s3_spec_new_addr
612
613  // no longer need the top Entry, but TOSR, TOSW, ssp sctr
614  // TODO: remove related signals
615  io.out.last_stage_spec_info.sctr  := s3_meta.sctr
616  io.out.last_stage_spec_info.ssp := s3_meta.ssp
617  io.out.last_stage_spec_info.TOSW := s3_meta.TOSW
618  io.out.last_stage_spec_info.TOSR := s3_meta.TOSR
619  io.out.last_stage_spec_info.NOS := s3_meta.NOS
620  io.out.last_stage_spec_info.topAddr := s3_top
621  io.out.last_stage_meta := s3_meta.asUInt
622
623
624  val redirect = RegNext(io.redirect)
625  val do_recover = redirect.valid
626  val recover_cfi = redirect.bits.cfiUpdate
627
628  val retMissPred  = do_recover && redirect.bits.level === 0.U && recover_cfi.pd.isRet
629  val callMissPred = do_recover && redirect.bits.level === 0.U && recover_cfi.pd.isCall
630  // when we mispredict a call, we must redo a push operation
631  // similarly, when we mispredict a return, we should redo a pop
632  stack.redirect_valid := do_recover
633  stack.redirect_isCall := callMissPred
634  stack.redirect_isRet := retMissPred
635  stack.redirect_meta_ssp := recover_cfi.ssp
636  stack.redirect_meta_sctr := recover_cfi.sctr
637  stack.redirect_meta_TOSW := recover_cfi.TOSW
638  stack.redirect_meta_TOSR := recover_cfi.TOSR
639  stack.redirect_meta_NOS := recover_cfi.NOS
640  stack.redirect_callAddr := recover_cfi.pc + Mux(recover_cfi.pd.isRVC, 2.U, 4.U)
641
642  val update = io.update.bits
643  val updateMeta = io.update.bits.meta.asTypeOf(new RASMeta)
644  val updateValid = io.update.valid
645
646  stack.commit_push_valid := updateValid && update.is_call_taken
647  stack.commit_pop_valid := updateValid && update.is_ret_taken
648  stack.commit_push_addr := update.ftb_entry.getFallThrough(update.pc) + Mux(update.ftb_entry.last_may_be_rvi_call, 2.U, 0.U)
649  stack.commit_meta_TOSW := updateMeta.TOSW
650  stack.commit_meta_TOSR := updateMeta.TOSR
651  stack.commit_meta_ssp := updateMeta.ssp
652  stack.commit_meta_sctr := updateMeta.sctr
653
654
655  XSPerfAccumulate("ras_s3_cancel", s3_cancel)
656  XSPerfAccumulate("ras_redirect_recover", redirect.valid)
657  XSPerfAccumulate("ras_s3_and_redirect_recover_at_the_same_time", s3_cancel && redirect.valid)
658
659
660  val spec_debug = stack.debug
661  XSDebug(io.s2_fire(2), "----------------RAS----------------\n")
662  XSDebug(io.s2_fire(2), " TopRegister: 0x%x\n",stack.spec_pop_addr)
663  XSDebug(io.s2_fire(2), "  index       addr           ctr           nos (spec part)\n")
664  for(i <- 0 until RasSpecSize){
665      XSDebug(io.s2_fire(2), "  (%d)   0x%x      %d       %d",i.U,spec_debug.spec_queue(i).retAddr,spec_debug.spec_queue(i).ctr, spec_debug.spec_nos(i).value)
666      when(i.U === stack.TOSW.value){XSDebug(io.s2_fire(2), "   <----TOSW")}
667      when(i.U === stack.TOSR.value){XSDebug(io.s2_fire(2), "   <----TOSR")}
668      when(i.U === stack.BOS.value){XSDebug(io.s2_fire(2), "   <----BOS")}
669      XSDebug(io.s2_fire(2), "\n")
670  }
671  XSDebug(io.s2_fire(2), "  index       addr           ctr   (committed part)\n")
672  for(i <- 0 until RasSize){
673      XSDebug(io.s2_fire(2), "  (%d)   0x%x      %d",i.U,spec_debug.commit_stack(i).retAddr,spec_debug.commit_stack(i).ctr)
674      when(i.U === stack.ssp){XSDebug(io.s2_fire(2), "   <----ssp")}
675      when(i.U === stack.nsp){XSDebug(io.s2_fire(2), "   <----nsp")}
676      XSDebug(io.s2_fire(2), "\n")
677  }
678  /*
679  XSDebug(s2_spec_push, "s2_spec_push  inAddr: 0x%x  inCtr: %d |  allocNewEntry:%d |   sp:%d \n",
680  s2_spec_new_addr,spec_debug.spec_push_entry.ctr,spec_debug.spec_alloc_new,spec_debug.sp.asUInt)
681  XSDebug(s2_spec_pop, "s2_spec_pop  outAddr: 0x%x \n",io.out.s2.getTarget)
682  val s3_recover_entry = spec_debug.recover_push_entry
683  XSDebug(s3_recover && s3_push, "s3_recover_push  inAddr: 0x%x  inCtr: %d |  allocNewEntry:%d |   sp:%d \n",
684    s3_recover_entry.retAddr, s3_recover_entry.ctr, spec_debug.recover_alloc_new, s3_sp.asUInt)
685  XSDebug(s3_recover && s3_pop, "s3_recover_pop  outAddr: 0x%x \n",io.out.s3.getTarget)
686  val redirectUpdate = redirect.bits.cfiUpdate
687  XSDebug(do_recover && callMissPred, "redirect_recover_push\n")
688  XSDebug(do_recover && retMissPred, "redirect_recover_pop\n")
689  XSDebug(do_recover, "redirect_recover(SP:%d retAddr:%x ctr:%d) \n",
690      redirectUpdate.rasSp,redirectUpdate.rasEntry.retAddr,redirectUpdate.rasEntry.ctr)
691  */
692
693  generatePerfEvent()
694}
695