xref: /XiangShan/src/main/scala/xiangshan/frontend/Frontend.scala (revision 602aa9f1a8fb63310bea30e8b3e247e5aca5f123)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15*
16*
17* Acknowledgement
18*
19* This implementation is inspired by several key papers:
20* [1] Alex Ramirez, Oliverio J. Santana, Josep L. Larriba-Pey, and Mateo Valero. "[Fetching instruction streams.]
21* (https://doi.org/10.1109/MICRO.2002.1176264)" 35th Annual IEEE/ACM International Symposium on Microarchitecture
22* (MICRO). 2002.
23* [2] Yasuo Ishii, Jaekyu Lee, Krishnendra Nathella, and Dam Sunwoo. "[Rebasing instruction prefetching: An industry
24* perspective.](https://doi.org/10.1109/LCA.2020.3035068)" IEEE Computer Architecture Letters 19.2: 147-150. 2020.
25* [3] Yasuo Ishii, Jaekyu Lee, Krishnendra Nathella, and Dam Sunwoo. "[Re-establishing fetch-directed instruction
26* prefetching: An industry perspective.](https://doi.org/10.1109/ISPASS51385.2021.00034)" 2021 IEEE International
27* Symposium on Performance Analysis of Systems and Software (ISPASS). 2021.
28***************************************************************************************/
29
30package xiangshan.frontend
31import chisel3._
32import chisel3.util._
33import freechips.rocketchip.diplomacy.LazyModule
34import freechips.rocketchip.diplomacy.LazyModuleImp
35import org.chipsalliance.cde.config.Parameters
36import utility._
37import utility.mbist.MbistInterface
38import utility.mbist.MbistPipeline
39import utility.sram.SramBroadcastBundle
40import utility.sram.SramCtlBundle
41import utility.sram.SramHelper
42import utility.sram.SramMbistBundle
43import xiangshan._
44import xiangshan.backend.fu.NewCSR.PFEvent
45import xiangshan.backend.fu.PMP
46import xiangshan.backend.fu.PMPChecker
47import xiangshan.backend.fu.PMPReqBundle
48import xiangshan.cache.mmu._
49import xiangshan.frontend.icache._
50
51class Frontend()(implicit p: Parameters) extends LazyModule with HasXSParameter {
52  override def shouldBeInlined: Boolean = false
53  val inner       = LazyModule(new FrontendInlined)
54  lazy val module = new FrontendImp(this)
55}
56
57class FrontendImp(wrapper: Frontend)(implicit p: Parameters) extends LazyModuleImp(wrapper) {
58  val io      = IO(wrapper.inner.module.io.cloneType)
59  val io_perf = IO(wrapper.inner.module.io_perf.cloneType)
60  io <> wrapper.inner.module.io
61  io_perf <> wrapper.inner.module.io_perf
62  if (p(DebugOptionsKey).ResetGen) {
63    ResetGen(ResetGenNode(Seq(ModuleNode(wrapper.inner.module))), reset, sim = false, io.sramTest.mbistReset)
64  }
65}
66
67class FrontendInlined()(implicit p: Parameters) extends LazyModule with HasXSParameter {
68  override def shouldBeInlined: Boolean = true
69
70  val instrUncache = LazyModule(new InstrUncache())
71  val icache       = LazyModule(new ICache())
72
73  lazy val module = new FrontendInlinedImp(this)
74}
75
76class FrontendInlinedImp(outer: FrontendInlined) extends LazyModuleImp(outer)
77    with HasXSParameter
78    with HasPerfEvents {
79  val io = IO(new Bundle() {
80    val hartId       = Input(UInt(hartIdLen.W))
81    val reset_vector = Input(UInt(PAddrBits.W))
82    val fencei       = Input(Bool())
83    val ptw          = new TlbPtwIO()
84    val backend      = new FrontendToCtrlIO
85    val softPrefetch = Vec(backendParams.LduCnt, Flipped(Valid(new SoftIfetchPrefetchBundle)))
86    val sfence       = Input(new SfenceBundle)
87    val tlbCsr       = Input(new TlbCsrBundle)
88    val csrCtrl      = Input(new CustomCSRCtrlIO)
89    val error        = ValidIO(new L1CacheErrorInfo)
90    val frontendInfo = new Bundle {
91      val ibufFull = Output(Bool())
92      val bpuInfo = new Bundle {
93        val bpRight = Output(UInt(XLEN.W))
94        val bpWrong = Output(UInt(XLEN.W))
95      }
96    }
97    val resetInFrontend = Output(Bool())
98    val debugTopDown = new Bundle {
99      val robHeadVaddr = Flipped(Valid(UInt(VAddrBits.W)))
100    }
101    val sramTest = new Bundle() {
102      val mbist      = Option.when(hasMbist)(Input(new SramMbistBundle))
103      val mbistReset = Option.when(hasMbist)(Input(new DFTResetSignals()))
104      val sramCtl    = Option.when(hasSramCtl)(Input(UInt(64.W)))
105    }
106  })
107
108  // decouped-frontend modules
109  val instrUncache = outer.instrUncache.module
110  val icache       = outer.icache.module
111  val bpu          = Module(new Predictor)
112  val ifu          = Module(new NewIFU)
113  val ibuffer      = Module(new IBuffer)
114  val ftq          = Module(new Ftq)
115
116  val needFlush            = RegNext(io.backend.toFtq.redirect.valid)
117  val FlushControlRedirect = RegNext(io.backend.toFtq.redirect.bits.debugIsCtrl)
118  val FlushMemVioRedirect  = RegNext(io.backend.toFtq.redirect.bits.debugIsMemVio)
119  val FlushControlBTBMiss  = Wire(Bool())
120  val FlushTAGEMiss        = Wire(Bool())
121  val FlushSCMiss          = Wire(Bool())
122  val FlushITTAGEMiss      = Wire(Bool())
123  val FlushRASMiss         = Wire(Bool())
124
125  val tlbCsr  = DelayN(io.tlbCsr, 2)
126  val csrCtrl = DelayN(io.csrCtrl, 2)
127  val sfence  = RegNext(RegNext(io.sfence))
128
129  // trigger
130  ifu.io.frontendTrigger := csrCtrl.frontend_trigger
131
132  // RVCDecoder fsIsOff
133  ifu.io.csr_fsIsOff := csrCtrl.fsIsOff
134
135  // bpu ctrl
136  bpu.io.ctrl         := csrCtrl.bp_ctrl
137  bpu.io.reset_vector := io.reset_vector
138
139  // pmp
140  val PortNumber = ICacheParameters().PortNumber
141  val pmp        = Module(new PMP())
142  val pmp_check  = VecInit(Seq.fill(coreParams.ipmpPortNum)(Module(new PMPChecker(3, sameCycle = true)).io))
143  pmp.io.distribute_csr := csrCtrl.distribute_csr
144  val pmp_req_vec = Wire(Vec(coreParams.ipmpPortNum, Valid(new PMPReqBundle())))
145  (0 until 2 * PortNumber).foreach(i => pmp_req_vec(i) <> icache.io.pmp(i).req)
146  pmp_req_vec.last <> ifu.io.pmp.req
147
148  for (i <- pmp_check.indices) {
149    if (HasBitmapCheck) {
150      pmp_check(i).apply(tlbCsr.mbmc.CMODE.asBool, tlbCsr.priv.imode, pmp.io.pmp, pmp.io.pma, pmp_req_vec(i))
151    } else {
152      pmp_check(i).apply(tlbCsr.priv.imode, pmp.io.pmp, pmp.io.pma, pmp_req_vec(i))
153    }
154  }
155  (0 until 2 * PortNumber).foreach(i => icache.io.pmp(i).resp <> pmp_check(i).resp)
156  ifu.io.pmp.resp <> pmp_check.last.resp
157
158  val itlb =
159    Module(new TLB(coreParams.itlbPortNum, nRespDups = 1, Seq.fill(PortNumber)(false) ++ Seq(true), itlbParams))
160  itlb.io.requestor.take(PortNumber) zip icache.io.itlb foreach { case (a, b) => a <> b }
161  itlb.io.requestor.last <> ifu.io.iTLBInter // mmio may need re-tlb, blocked
162  itlb.io.hartId := io.hartId
163  itlb.io.base_connect(sfence, tlbCsr)
164  itlb.io.flushPipe.foreach(_ := icache.io.itlbFlushPipe)
165  itlb.io.redirect := DontCare // itlb has flushpipe, don't need redirect signal
166
167  val itlb_ptw = Wire(new VectorTlbPtwIO(coreParams.itlbPortNum))
168  itlb_ptw.connect(itlb.io.ptw)
169  val itlbRepeater1 = PTWFilter(itlbParams.fenceDelay, itlb_ptw, sfence, tlbCsr, l2tlbParams.ifilterSize)
170  val itlbRepeater2 =
171    PTWRepeaterNB(passReady = false, itlbParams.fenceDelay, itlbRepeater1.io.ptw, io.ptw, sfence, tlbCsr)
172
173  icache.io.ftqPrefetch <> ftq.io.toPrefetch
174  icache.io.softPrefetch <> io.softPrefetch
175
176  // IFU-Ftq
177  ifu.io.ftqInter.fromFtq <> ftq.io.toIfu
178  ftq.io.toIfu.req.ready := ifu.io.ftqInter.fromFtq.req.ready && icache.io.fetch.req.ready
179
180  ftq.io.fromIfu <> ifu.io.ftqInter.toFtq
181  bpu.io.ftq_to_bpu <> ftq.io.toBpu
182  ftq.io.fromBpu <> bpu.io.bpu_to_ftq
183
184  ftq.io.mmioCommitRead <> ifu.io.mmioCommitRead
185
186  // IFU-ICache
187  icache.io.fetch.req <> ftq.io.toICache.req
188  ftq.io.toICache.req.ready := ifu.io.ftqInter.fromFtq.req.ready && icache.io.fetch.req.ready
189
190  ifu.io.icacheInter.resp <> icache.io.fetch.resp
191  ifu.io.icacheInter.icacheReady       := icache.io.toIFU
192  ifu.io.icacheInter.topdownIcacheMiss := icache.io.fetch.topdownIcacheMiss
193  ifu.io.icacheInter.topdownItlbMiss   := icache.io.fetch.topdownItlbMiss
194  icache.io.stop                       := ifu.io.icacheStop
195  icache.io.flush                      := ftq.io.icacheFlush
196
197  ifu.io.icachePerfInfo := icache.io.perfInfo
198
199  icache.io.csr_pf_enable := RegNext(csrCtrl.pf_ctrl.l1I_pf_enable)
200
201  icache.io.fencei := RegNext(io.fencei)
202
203  // IFU-Ibuffer
204  ifu.io.toIbuffer <> ibuffer.io.in
205
206  ftq.io.fromBackend <> io.backend.toFtq
207  io.backend.fromFtq := ftq.io.toBackend
208  io.backend.fromIfu := ifu.io.toBackend
209  io.frontendInfo.bpuInfo <> ftq.io.bpuInfo
210
211  val checkPcMem = Reg(Vec(FtqSize, new Ftq_RF_Components))
212  when(ftq.io.toBackend.pc_mem_wen) {
213    checkPcMem(ftq.io.toBackend.pc_mem_waddr) := ftq.io.toBackend.pc_mem_wdata
214  }
215
216  val checkTargetPtr = Wire(Vec(DecodeWidth, new FtqPtr))
217  val checkTarget    = Wire(Vec(DecodeWidth, UInt(VAddrBits.W)))
218
219  for (i <- 0 until DecodeWidth) {
220    checkTargetPtr(i) := ibuffer.io.out(i).bits.ftqPtr
221    checkTarget(i) := Mux(
222      ftq.io.toBackend.newest_entry_ptr.value === checkTargetPtr(i).value,
223      ftq.io.toBackend.newest_entry_target,
224      checkPcMem((checkTargetPtr(i) + 1.U).value).startAddr
225    )
226  }
227
228  // commented out for this br could be the last instruction in the fetch block
229  def checkNotTakenConsecutive = {
230    val prevNotTakenValid  = RegInit(0.B)
231    val prevNotTakenFtqPtr = Reg(new FtqPtr)
232    for (i <- 0 until DecodeWidth - 1) {
233      // for instrs that is not the last, if a not-taken br, the next instr should have the same ftqPtr
234      // for instrs that is the last, record and check next request
235      when(ibuffer.io.out(i).fire && ibuffer.io.out(i).bits.pd.isBr) {
236        when(ibuffer.io.out(i + 1).fire) {
237          // not last br, check now
238        }.otherwise {
239          // last br, record its info
240          prevNotTakenValid  := true.B
241          prevNotTakenFtqPtr := checkTargetPtr(i)
242        }
243      }
244      XSError(
245        ibuffer.io.out(i).fire && ibuffer.io.out(i).bits.pd.isBr &&
246          ibuffer.io.out(i + 1).fire &&
247          checkTargetPtr(i).value =/= checkTargetPtr(i + 1).value,
248        "not-taken br should have same ftqPtr\n"
249      )
250    }
251    when(ibuffer.io.out(DecodeWidth - 1).fire && ibuffer.io.out(DecodeWidth - 1).bits.pd.isBr) {
252      // last instr is a br, record its info
253      prevNotTakenValid  := true.B
254      prevNotTakenFtqPtr := checkTargetPtr(DecodeWidth - 1)
255    }
256    when(prevNotTakenValid && ibuffer.io.out(0).fire) {
257      prevNotTakenValid := false.B
258    }
259    XSError(
260      prevNotTakenValid && ibuffer.io.out(0).fire &&
261        prevNotTakenFtqPtr.value =/= checkTargetPtr(0).value,
262      "not-taken br should have same ftqPtr\n"
263    )
264
265    when(needFlush) {
266      prevNotTakenValid := false.B
267    }
268  }
269
270  def checkTakenNotConsecutive = {
271    val prevTakenValid  = RegInit(0.B)
272    val prevTakenFtqPtr = Reg(new FtqPtr)
273    for (i <- 0 until DecodeWidth - 1) {
274      // for instrs that is not the last, if a taken br, the next instr should not have the same ftqPtr
275      // for instrs that is the last, record and check next request
276      when(ibuffer.io.out(i).fire && ibuffer.io.out(i).bits.pd.isBr && ibuffer.io.out(i).bits.pred_taken) {
277        when(ibuffer.io.out(i + 1).fire) {
278          // not last br, check now
279        }.otherwise {
280          // last br, record its info
281          prevTakenValid  := true.B
282          prevTakenFtqPtr := checkTargetPtr(i)
283        }
284      }
285      XSError(
286        ibuffer.io.out(i).fire && ibuffer.io.out(i).bits.pd.isBr && ibuffer.io.out(i).bits.pred_taken &&
287          ibuffer.io.out(i + 1).fire &&
288          (checkTargetPtr(i) + 1.U).value =/= checkTargetPtr(i + 1).value,
289        "taken br should have consecutive ftqPtr\n"
290      )
291    }
292    when(ibuffer.io.out(DecodeWidth - 1).fire && ibuffer.io.out(DecodeWidth - 1).bits.pd.isBr && ibuffer.io.out(
293      DecodeWidth - 1
294    ).bits.pred_taken) {
295      // last instr is a br, record its info
296      prevTakenValid  := true.B
297      prevTakenFtqPtr := checkTargetPtr(DecodeWidth - 1)
298    }
299    when(prevTakenValid && ibuffer.io.out(0).fire) {
300      prevTakenValid := false.B
301    }
302    XSError(
303      prevTakenValid && ibuffer.io.out(0).fire &&
304        (prevTakenFtqPtr + 1.U).value =/= checkTargetPtr(0).value,
305      "taken br should have consecutive ftqPtr\n"
306    )
307    when(needFlush) {
308      prevTakenValid := false.B
309    }
310  }
311
312  def checkNotTakenPC = {
313    val prevNotTakenPC    = Reg(UInt(VAddrBits.W))
314    val prevIsRVC         = Reg(Bool())
315    val prevNotTakenValid = RegInit(0.B)
316
317    for (i <- 0 until DecodeWidth - 1) {
318      when(ibuffer.io.out(i).fire && ibuffer.io.out(i).bits.pd.isBr && !ibuffer.io.out(i).bits.pred_taken) {
319        when(ibuffer.io.out(i + 1).fire) {}.otherwise {
320          prevNotTakenValid := true.B
321          prevIsRVC         := ibuffer.io.out(i).bits.pd.isRVC
322          prevNotTakenPC    := ibuffer.io.out(i).bits.pc
323        }
324      }
325      XSError(
326        ibuffer.io.out(i).fire && ibuffer.io.out(i).bits.pd.isBr && !ibuffer.io.out(i).bits.pred_taken &&
327          ibuffer.io.out(i + 1).fire &&
328          ibuffer.io.out(i).bits.pc + Mux(ibuffer.io.out(i).bits.pd.isRVC, 2.U, 4.U) =/= ibuffer.io.out(
329            i + 1
330          ).bits.pc,
331        "not-taken br should have consecutive pc\n"
332      )
333    }
334    when(ibuffer.io.out(DecodeWidth - 1).fire && ibuffer.io.out(DecodeWidth - 1).bits.pd.isBr && !ibuffer.io.out(
335      DecodeWidth - 1
336    ).bits.pred_taken) {
337      prevNotTakenValid := true.B
338      prevIsRVC         := ibuffer.io.out(DecodeWidth - 1).bits.pd.isRVC
339      prevNotTakenPC    := ibuffer.io.out(DecodeWidth - 1).bits.pc
340    }
341    when(prevNotTakenValid && ibuffer.io.out(0).fire) {
342      prevNotTakenValid := false.B
343    }
344    XSError(
345      prevNotTakenValid && ibuffer.io.out(0).fire &&
346        prevNotTakenPC + Mux(prevIsRVC, 2.U, 4.U) =/= ibuffer.io.out(0).bits.pc,
347      "not-taken br should have same pc\n"
348    )
349    when(needFlush) {
350      prevNotTakenValid := false.B
351    }
352  }
353
354  def checkTakenPC = {
355    val prevTakenFtqPtr = Reg(new FtqPtr)
356    val prevTakenValid  = RegInit(0.B)
357    val prevTakenTarget = Wire(UInt(VAddrBits.W))
358    prevTakenTarget := checkPcMem((prevTakenFtqPtr + 1.U).value).startAddr
359
360    for (i <- 0 until DecodeWidth - 1) {
361      when(ibuffer.io.out(i).fire && !ibuffer.io.out(i).bits.pd.notCFI && ibuffer.io.out(i).bits.pred_taken) {
362        when(ibuffer.io.out(i + 1).fire) {}.otherwise {
363          prevTakenValid  := true.B
364          prevTakenFtqPtr := checkTargetPtr(i)
365        }
366      }
367      XSError(
368        ibuffer.io.out(i).fire && !ibuffer.io.out(i).bits.pd.notCFI && ibuffer.io.out(i).bits.pred_taken &&
369          ibuffer.io.out(i + 1).fire &&
370          checkTarget(i) =/= ibuffer.io.out(i + 1).bits.pc,
371        "taken instr should follow target pc\n"
372      )
373    }
374    when(ibuffer.io.out(DecodeWidth - 1).fire && !ibuffer.io.out(DecodeWidth - 1).bits.pd.notCFI && ibuffer.io.out(
375      DecodeWidth - 1
376    ).bits.pred_taken) {
377      prevTakenValid  := true.B
378      prevTakenFtqPtr := checkTargetPtr(DecodeWidth - 1)
379    }
380    when(prevTakenValid && ibuffer.io.out(0).fire) {
381      prevTakenValid := false.B
382    }
383    XSError(
384      prevTakenValid && ibuffer.io.out(0).fire &&
385        prevTakenTarget =/= ibuffer.io.out(0).bits.pc,
386      "taken instr should follow target pc\n"
387    )
388    when(needFlush) {
389      prevTakenValid := false.B
390    }
391  }
392
393  // checkNotTakenConsecutive
394  checkTakenNotConsecutive
395  checkTakenPC
396  checkNotTakenPC
397
398  ifu.io.rob_commits <> io.backend.toFtq.rob_commits
399
400  ibuffer.io.flush                := needFlush
401  ibuffer.io.ControlRedirect      := FlushControlRedirect
402  ibuffer.io.MemVioRedirect       := FlushMemVioRedirect
403  ibuffer.io.ControlBTBMissBubble := FlushControlBTBMiss
404  ibuffer.io.TAGEMissBubble       := FlushTAGEMiss
405  ibuffer.io.SCMissBubble         := FlushSCMiss
406  ibuffer.io.ITTAGEMissBubble     := FlushITTAGEMiss
407  ibuffer.io.RASMissBubble        := FlushRASMiss
408  ibuffer.io.decodeCanAccept      := io.backend.canAccept
409
410  FlushControlBTBMiss := ftq.io.ControlBTBMissBubble
411  FlushTAGEMiss       := ftq.io.TAGEMissBubble
412  FlushSCMiss         := ftq.io.SCMissBubble
413  FlushITTAGEMiss     := ftq.io.ITTAGEMissBubble
414  FlushRASMiss        := ftq.io.RASMissBubble
415
416  io.backend.cfVec <> ibuffer.io.out
417  io.backend.stallReason <> ibuffer.io.stallReason
418
419  instrUncache.io.req <> ifu.io.uncacheInter.toUncache
420  ifu.io.uncacheInter.fromUncache <> instrUncache.io.resp
421  instrUncache.io.flush := false.B
422  io.error <> RegNext(RegNext(icache.io.error))
423
424  icache.io.hartId := io.hartId
425
426  itlbRepeater1.io.debugTopDown.robHeadVaddr := io.debugTopDown.robHeadVaddr
427
428  io.frontendInfo.ibufFull := RegNext(ibuffer.io.full)
429  io.resetInFrontend       := reset.asBool
430
431  // PFEvent
432  val pfevent = Module(new PFEvent)
433  pfevent.io.distribute_csr := io.csrCtrl.distribute_csr
434  val csrevents = pfevent.io.hpmevent.take(8)
435
436  val perfFromUnits = Seq(ifu, ibuffer, icache, ftq, bpu).flatMap(_.getPerfEvents)
437  val perfFromIO    = Seq()
438  val perfBlock     = Seq()
439  // let index = 0 be no event
440  val allPerfEvents = Seq(("noEvent", 0.U)) ++ perfFromUnits ++ perfFromIO ++ perfBlock
441
442  if (printEventCoding) {
443    for (((name, inc), i) <- allPerfEvents.zipWithIndex) {
444      println("Frontend perfEvents Set", name, inc, i)
445    }
446  }
447
448  val allPerfInc          = allPerfEvents.map(_._2.asTypeOf(new PerfEvent))
449  override val perfEvents = HPerfMonitor(csrevents, allPerfInc).getPerfEvents
450  generatePerfEvent()
451
452  private val mbistPl = MbistPipeline.PlaceMbistPipeline(Int.MaxValue, "MbistPipeFrontend", hasMbist)
453  private val mbistIntf = if (hasMbist) {
454    val params = mbistPl.get.nodeParams
455    val intf = Some(Module(new MbistInterface(
456      params = Seq(params),
457      ids = Seq(mbistPl.get.childrenIds),
458      name = s"MbistIntfFrontend",
459      pipelineNum = 1
460    )))
461    intf.get.toPipeline.head <> mbistPl.get.mbist
462    mbistPl.get.registerCSV(intf.get.info, "MbistFrontend")
463    intf.get.mbist := DontCare
464    dontTouch(intf.get.mbist)
465    // TODO: add mbist controller connections here
466    intf
467  } else {
468    None
469  }
470  private val sigFromSrams = if (hasSramTest) Some(SramHelper.genBroadCastBundleTop()) else None
471  private val cg           = ClockGate.genTeSrc
472  dontTouch(cg)
473
474  sigFromSrams.foreach { case sig => sig.mbist := DontCare }
475  if (hasMbist) {
476    sigFromSrams.get.mbist := io.sramTest.mbist.get
477    cg.cgen                := io.sramTest.mbist.get.cgen
478  } else {
479    cg.cgen := false.B
480  }
481
482  sigFromSrams.foreach { case sig => sig.sramCtl := DontCare }
483  if (hasSramCtl) {
484    val sramCtlBundle = io.sramTest.sramCtl.get.asTypeOf(new SramCtlBundle)
485    sigFromSrams.get.sramCtl.MCR := sramCtlBundle.MCR // CFG[5 : 4]
486    sigFromSrams.get.sramCtl.MCW := sramCtlBundle.MCW // CFG[7 : 6]
487    sigFromSrams.get.sramCtl.RCT := sramCtlBundle.RCT // CFG[35 : 34]
488    sigFromSrams.get.sramCtl.WCT := sramCtlBundle.WCT // CFG[37 : 36]
489    sigFromSrams.get.sramCtl.KP  := sramCtlBundle.KP  // CFG[40 : 38]
490  }
491}
492