xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueueUncache.scala (revision ae3969316eb5e8c2cf8f323541ccba5fd8b22c3d)
1/***************************************************************************************
2 * Copyright (c) 2024 Beijing Institute of Open Source Chip (BOSC)
3 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
4 * Copyright (c) 2020-2021 Peng Cheng Laboratory
5 *
6 * XiangShan is licensed under Mulan PSL v2.
7 * You can use this software according to the terms and conditions of the Mulan PSL v2.
8 * You may obtain a copy of Mulan PSL v2 at:
9 *          http://license.coscl.org.cn/MulanPSL2
10 *
11 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
12 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
13 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
14 *
15 * See the Mulan PSL v2 for more details.
16 ***************************************************************************************/
17package xiangshan.mem
18
19import chisel3._
20import chisel3.util._
21import org.chipsalliance.cde.config._
22import xiangshan._
23import xiangshan.backend.rob.{RobPtr, RobLsqIO}
24import xiangshan.ExceptionNO._
25import xiangshan.cache._
26import utils._
27import utility._
28import xiangshan.backend.Bundles
29import xiangshan.backend.Bundles.{DynInst, MemExuOutput}
30import xiangshan.backend.fu.FuConfig.LduCfg
31import xiangshan.backend.HasMemBlockParameters
32
33class UncacheEntry(entryIndex: Int)(implicit p: Parameters) extends XSModule
34  with HasCircularQueuePtrHelper
35  with HasLoadHelper
36{
37  val io = IO(new Bundle() {
38    /* control */
39    val redirect = Flipped(Valid(new Redirect))
40    // redirect flush
41    val flush = Output(Bool())
42    // mmio commit
43    val rob = Flipped(new RobLsqIO)
44    // mmio select
45    val mmioSelect = Output(Bool())
46
47    /* transaction */
48    // from ldu
49    val req = Flipped(Valid(new LqWriteBundle))
50    // to ldu: mmio, data
51    val mmioOut = DecoupledIO(new MemExuOutput)
52    val mmioRawData = Output(new LoadDataFromLQBundle)
53    // to ldu: nc with data
54    val ncOut = DecoupledIO(new LsPipelineBundle)
55    // <=> uncache
56    val uncache = new UncacheWordIO
57    // exception generated by outer bus
58    val exception = Valid(new LqWriteBundle)
59  })
60
61  val req_valid = RegInit(false.B)
62  val isNC = RegInit(false.B)
63  val req = Reg(new LqWriteBundle)
64
65  val s_idle :: s_req :: s_resp :: s_wait :: Nil = Enum(4)
66  val uncacheState = RegInit(s_idle)
67  val uncacheData = Reg(io.uncache.resp.bits.data.cloneType)
68  val nderr = RegInit(false.B)
69
70  val writeback = Mux(req.nc, io.ncOut.fire, io.mmioOut.fire)
71
72  /**
73    * Flush
74    *
75    * 1. direct flush during idle
76    * 2. otherwise delayed flush until receiving uncache resp
77    */
78  val needFlushReg = RegInit(false.B)
79  val needFlush = req_valid && req.uop.robIdx.needFlush(io.redirect)
80  val flush = (needFlush && uncacheState===s_idle) || (io.uncache.resp.fire && needFlushReg)
81  when(flush){
82    needFlushReg := false.B
83  }.elsewhen(needFlush){
84    needFlushReg := true.B
85  }
86
87  /* enter req */
88  when (flush) {
89    req_valid := false.B
90  } .elsewhen (io.req.valid) {
91    req_valid := true.B
92    req := io.req.bits
93    nderr := false.B
94  } .elsewhen (writeback) {
95    req_valid := false.B
96  }
97  XSError(!flush && io.req.valid && req_valid, p"LoadQueueUncache: You can not write an valid entry: $entryIndex")
98
99  /**
100    * Memory mapped IO / NC operations
101    *
102    * States:
103    * (1) s_idle: wait for mmio reaching ROB's head / nc req valid from loadunit
104    * (2) s_req: wait to be sent to uncache channel until req selected and uncache ready
105    * (3) s_resp: wait for response from uncache channel
106    * (4) s_wait: wait for loadunit to receive writeback req
107    */
108  val pendingld = GatedValidRegNext(io.rob.pendingMMIOld)
109  val pendingPtr = GatedRegNext(io.rob.pendingPtr)
110  val canSendReq = req_valid && !needFlush && Mux(
111    req.nc, true.B,
112    pendingld && req.uop.robIdx === pendingPtr
113  )
114  switch (uncacheState) {
115    is (s_idle) {
116      when (canSendReq) {
117        uncacheState := s_req
118      }
119    }
120    is (s_req) {
121      when (io.uncache.req.fire) {
122        uncacheState := s_resp
123      }
124    }
125    is (s_resp) {
126      when (io.uncache.resp.fire) {
127        when (needFlushReg) {
128          uncacheState := s_idle
129        }.otherwise{
130          uncacheState := s_wait
131        }
132      }
133    }
134    is (s_wait) {
135      when (writeback) {
136        uncacheState := s_idle
137      }
138    }
139  }
140
141  /* control */
142  io.flush := flush
143  io.rob.mmio := DontCare
144  io.rob.uop := DontCare
145  io.mmioSelect := (uncacheState =/= s_idle) && req.mmio
146
147  /* uncahce req */
148  io.uncache.req.valid     := uncacheState === s_req
149  io.uncache.req.bits      := DontCare
150  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
151  io.uncache.req.bits.data := DontCare
152  io.uncache.req.bits.addr := req.paddr
153  io.uncache.req.bits.vaddr:= req.vaddr
154  io.uncache.req.bits.mask := Mux(req.paddr(3), req.mask(15, 8), req.mask(7, 0))
155  io.uncache.req.bits.id   := entryIndex.U
156  io.uncache.req.bits.instrtype := DontCare
157  io.uncache.req.bits.replayCarry := DontCare
158  io.uncache.req.bits.atomic := req.atomic
159  io.uncache.req.bits.nc := req.nc
160  io.uncache.req.bits.memBackTypeMM := req.memBackTypeMM
161
162  io.uncache.resp.ready := true.B
163
164  /* uncahce resp */
165  when (io.uncache.resp.fire) {
166    uncacheData := io.uncache.resp.bits.data
167    nderr := io.uncache.resp.bits.nderr
168  }
169
170  /* uncahce writeback */
171  val selUop = req.uop
172  val func = selUop.fuOpType
173  val raddr = req.paddr
174  val rdataSel = LookupTree(raddr(2, 0), List(
175      "b000".U -> uncacheData(63,  0),
176      "b001".U -> uncacheData(63,  8),
177      "b010".U -> uncacheData(63, 16),
178      "b011".U -> uncacheData(63, 24),
179      "b100".U -> uncacheData(63, 32),
180      "b101".U -> uncacheData(63, 40),
181      "b110".U -> uncacheData(63, 48),
182      "b111".U -> uncacheData(63, 56)
183    ))
184  val rdataPartialLoad = rdataHelper(selUop, rdataSel)
185
186  io.mmioOut.valid := false.B
187  io.mmioOut.bits := DontCare
188  io.mmioRawData := DontCare
189  io.ncOut.valid := false.B
190  io.ncOut.bits := DontCare
191
192  when(req.nc){
193    io.ncOut.valid := (uncacheState === s_wait)
194    io.ncOut.bits := DontCare
195    io.ncOut.bits.uop := selUop
196    io.ncOut.bits.uop.lqIdx := req.uop.lqIdx
197    io.ncOut.bits.uop.exceptionVec(loadAccessFault) := nderr
198    io.ncOut.bits.data := rdataPartialLoad
199    io.ncOut.bits.paddr := req.paddr
200    io.ncOut.bits.vaddr := req.vaddr
201    io.ncOut.bits.nc := true.B
202    io.ncOut.bits.mask := Mux(req.paddr(3), req.mask(15, 8), req.mask(7, 0))
203    io.ncOut.bits.schedIndex := req.schedIndex
204    io.ncOut.bits.isvec := req.isvec
205    io.ncOut.bits.is128bit := req.is128bit
206    io.ncOut.bits.vecActive := req.vecActive
207  }.otherwise{
208    io.mmioOut.valid := (uncacheState === s_wait)
209    io.mmioOut.bits := DontCare
210    io.mmioOut.bits.uop := selUop
211    io.mmioOut.bits.uop.lqIdx := req.uop.lqIdx
212    io.mmioOut.bits.uop.exceptionVec(loadAccessFault) := nderr
213    io.mmioOut.bits.data := rdataPartialLoad
214    io.mmioOut.bits.debug.isMMIO := true.B
215    io.mmioOut.bits.debug.isNC := false.B
216    io.mmioOut.bits.debug.paddr := req.paddr
217    io.mmioOut.bits.debug.vaddr := req.vaddr
218    io.mmioRawData.lqData := uncacheData
219    io.mmioRawData.uop := req.uop
220    io.mmioRawData.addrOffset := req.paddr
221  }
222
223  io.exception.valid := writeback
224  io.exception.bits := req
225  io.exception.bits.uop.exceptionVec(loadAccessFault) := nderr
226
227  /* debug log */
228  XSDebug(io.uncache.req.fire,
229    "uncache req: pc %x addr %x data %x op %x mask %x\n",
230    req.uop.pc,
231    io.uncache.req.bits.addr,
232    io.uncache.req.bits.data,
233    io.uncache.req.bits.cmd,
234    io.uncache.req.bits.mask
235  )
236  XSInfo(io.ncOut.fire,
237    "int load miss write to cbd robidx %d lqidx %d pc 0x%x mmio %x\n",
238    io.ncOut.bits.uop.robIdx.asUInt,
239    io.ncOut.bits.uop.lqIdx.asUInt,
240    io.ncOut.bits.uop.pc,
241    true.B
242  )
243  XSInfo(io.mmioOut.fire,
244    "int load miss write to cbd robidx %d lqidx %d pc 0x%x mmio %x\n",
245    io.mmioOut.bits.uop.robIdx.asUInt,
246    io.mmioOut.bits.uop.lqIdx.asUInt,
247    io.mmioOut.bits.uop.pc,
248    true.B
249  )
250
251}
252
253class LoadQueueUncache(implicit p: Parameters) extends XSModule
254  with HasCircularQueuePtrHelper
255  with HasMemBlockParameters
256{
257  val io = IO(new Bundle() {
258    /* control */
259    val redirect = Flipped(Valid(new Redirect))
260    // mmio commit
261    val rob = Flipped(new RobLsqIO)
262
263    /* transaction */
264    // enqueue: from ldu s3
265    val req = Vec(LoadPipelineWidth, Flipped(Decoupled(new LqWriteBundle)))
266    // writeback: mmio to ldu s0, s3
267    val mmioOut = Vec(LoadPipelineWidth, DecoupledIO(new MemExuOutput))
268    val mmioRawData = Vec(LoadPipelineWidth, Output(new LoadDataFromLQBundle))
269    // writeback: nc to ldu s0--s3
270    val ncOut = Vec(LoadPipelineWidth, Decoupled(new LsPipelineBundle))
271    // <=>uncache
272    val uncache = new UncacheWordIO
273
274    /* except */
275    // rollback from frontend when buffer is full
276    val rollback = Output(Valid(new Redirect))
277    // exception generated by outer bus
278    val exception = Valid(new LqWriteBundle)
279  })
280
281  /******************************************************************
282   * Structure
283   ******************************************************************/
284  val entries = Seq.tabulate(LoadUncacheBufferSize)(i => Module(new UncacheEntry(i)))
285
286  val freeList = Module(new FreeList(
287    size = LoadUncacheBufferSize,
288    allocWidth = LoadPipelineWidth,
289    freeWidth = 4,
290    enablePreAlloc = true,
291    moduleName = "LoadQueueUncache freelist"
292  ))
293  freeList.io := DontCare
294
295  // set default IO
296  entries.foreach {
297    case (e) =>
298      e.io.req.valid := false.B
299      e.io.req.bits := DontCare
300      e.io.uncache.req.ready := false.B
301      e.io.uncache.resp.valid := false.B
302      e.io.uncache.resp.bits := DontCare
303      e.io.ncOut.ready := false.B
304      e.io.mmioOut.ready := false.B
305  }
306  io.uncache.req.valid := false.B
307  io.uncache.req.bits := DontCare
308  io.uncache.resp.ready := false.B
309  for (w <- 0 until LoadPipelineWidth) {
310    io.mmioOut(w).valid := false.B
311    io.mmioOut(w).bits := DontCare
312    io.mmioRawData(w) := DontCare
313    io.ncOut(w).valid := false.B
314    io.ncOut(w).bits := DontCare
315  }
316
317
318  /******************************************************************
319   * Enqueue
320   *
321   * s1: hold
322   * s2: confirm enqueue and write entry
323   *    valid: no redirect, no exception, no replay, is mmio/nc
324   *    ready: freelist can allocate
325   ******************************************************************/
326
327  val s1_sortedVec = HwSort(VecInit(io.req.map { case x => DataWithPtr(x.valid, x.bits, x.bits.uop.robIdx) }))
328  val s1_req = VecInit(s1_sortedVec.map(_.bits))
329  val s1_valid = VecInit(s1_sortedVec.map(_.valid))
330  val s2_enqueue = Wire(Vec(LoadPipelineWidth, Bool()))
331  io.req.zipWithIndex.foreach{ case (r, i) =>
332    r.ready := true.B
333  }
334
335  // s2: enqueue
336  val s2_req = (0 until LoadPipelineWidth).map(i => {RegEnable(s1_req(i), s1_valid(i))})
337  val s2_valid = (0 until LoadPipelineWidth).map(i => {
338    RegNext(s1_valid(i)) &&
339    !s2_req(i).uop.robIdx.needFlush(RegNext(io.redirect)) &&
340    !s2_req(i).uop.robIdx.needFlush(io.redirect)
341  })
342  val s2_has_exception = s2_req.map(x => ExceptionNO.selectByFu(x.uop.exceptionVec, LduCfg).asUInt.orR)
343  val s2_need_replay = s2_req.map(_.rep_info.need_rep)
344
345  for (w <- 0 until LoadPipelineWidth) {
346    s2_enqueue(w) := s2_valid(w) && !s2_has_exception(w) && !s2_need_replay(w) && (s2_req(w).mmio || s2_req(w).nc)
347  }
348
349  val s2_enqValidVec = Wire(Vec(LoadPipelineWidth, Bool()))
350  val s2_enqIndexVec = Wire(Vec(LoadPipelineWidth, UInt()))
351
352  for (w <- 0 until LoadPipelineWidth) {
353    freeList.io.allocateReq(w) := true.B
354  }
355
356  // freeList real-allocate
357  for (w <- 0 until LoadPipelineWidth) {
358    freeList.io.doAllocate(w) := s2_enqValidVec(w)
359  }
360
361  for (w <- 0 until LoadPipelineWidth) {
362    val offset = PopCount(s2_enqueue.take(w))
363    s2_enqValidVec(w) := s2_enqueue(w) && freeList.io.canAllocate(offset)
364    s2_enqIndexVec(w) := freeList.io.allocateSlot(offset)
365  }
366
367
368  /******************************************************************
369   * Uncache Transaction
370   *
371   * 1. uncache req
372   * 2. uncache resp
373   * 3. writeback
374   ******************************************************************/
375  private val NC_WB_MOD = NCWBPorts.length
376
377  val uncacheReq = Wire(DecoupledIO(io.uncache.req.bits.cloneType))
378  val mmioSelect = entries.map(e => e.io.mmioSelect).reduce(_ || _)
379  val mmioReq = Wire(DecoupledIO(io.uncache.req.bits.cloneType))
380  // TODO lyq: It's best to choose in robIdx order / the order in which they enter
381  val ncReqArb = Module(new RRArbiterInit(io.uncache.req.bits.cloneType, LoadUncacheBufferSize))
382
383  val mmioOut = Wire(DecoupledIO(io.mmioOut(0).bits.cloneType))
384  val mmioRawData = Wire(io.mmioRawData(0).cloneType)
385  val ncOut = Wire(chiselTypeOf(io.ncOut))
386  val ncOutValidVec = VecInit(entries.map(e => e.io.ncOut.valid))
387  val ncOutValidVecRem = SubVec.getMaskRem(ncOutValidVec, NC_WB_MOD)
388
389  // init
390  uncacheReq.valid := false.B
391  uncacheReq.bits  := DontCare
392  mmioReq.valid := false.B
393  mmioReq.bits := DontCare
394  mmioOut.valid := false.B
395  mmioOut.bits := DontCare
396  mmioRawData := DontCare
397  for (i <- 0 until LoadUncacheBufferSize) {
398    ncReqArb.io.in(i).valid := false.B
399    ncReqArb.io.in(i).bits := DontCare
400  }
401  for (i <- 0 until LoadPipelineWidth) {
402    ncOut(i).valid := false.B
403    ncOut(i).bits := DontCare
404  }
405
406  entries.zipWithIndex.foreach {
407    case (e, i) =>
408      // enqueue
409      for (w <- 0 until LoadPipelineWidth) {
410        when (s2_enqValidVec(w) && (i.U === s2_enqIndexVec(w))) {
411          e.io.req.valid := true.B
412          e.io.req.bits := s2_req(w)
413        }
414      }
415
416      // control
417      e.io.redirect <> io.redirect
418      e.io.rob <> io.rob
419
420      // uncache req, writeback
421      when (e.io.mmioSelect) {
422        mmioReq.valid := e.io.uncache.req.valid
423        mmioReq.bits := e.io.uncache.req.bits
424        e.io.uncache.req.ready := mmioReq.ready
425
426        e.io.mmioOut.ready := mmioOut.ready
427        mmioOut.valid := e.io.mmioOut.valid
428        mmioOut.bits := e.io.mmioOut.bits
429        mmioRawData := e.io.mmioRawData
430
431      }.otherwise{
432        ncReqArb.io.in(i).valid := e.io.uncache.req.valid
433        ncReqArb.io.in(i).bits := e.io.uncache.req.bits
434        e.io.uncache.req.ready := ncReqArb.io.in(i).ready
435
436        (0 until NC_WB_MOD).map { w =>
437          val (idx, ncOutValid) = PriorityEncoderWithFlag(ncOutValidVecRem(w))
438          val port = NCWBPorts(w)
439          when((i.U === idx) && ncOutValid) {
440            ncOut(port).valid := ncOutValid
441            ncOut(port).bits := e.io.ncOut.bits
442            e.io.ncOut.ready := ncOut(port).ready
443          }
444        }
445
446      }
447
448      // uncache resp
449      when (i.U === io.uncache.resp.bits.id) {
450        e.io.uncache.resp <> io.uncache.resp
451      }
452
453  }
454
455  mmioReq.ready := false.B
456  ncReqArb.io.out.ready := false.B
457  when(mmioSelect){
458    uncacheReq <> mmioReq
459  }.otherwise{
460    uncacheReq <> ncReqArb.io.out
461  }
462
463  // uncache Request
464  AddPipelineReg(uncacheReq, io.uncache.req, false.B)
465
466  // uncache Writeback
467  AddPipelineReg(mmioOut, io.mmioOut(UncacheWBPort), false.B)
468  io.mmioRawData(UncacheWBPort) := RegEnable(mmioRawData, mmioOut.fire)
469
470  (0 until LoadPipelineWidth).foreach { i => AddPipelineReg(ncOut(i), io.ncOut(i), false.B) }
471
472  // uncache exception
473  io.exception.valid := Cat(entries.map(_.io.exception.valid)).orR
474  io.exception.bits := ParallelPriorityMux(entries.map(e =>
475    (e.io.exception.valid, e.io.exception.bits)
476  ))
477
478  // rob
479  for (i <- 0 until LoadPipelineWidth) {
480    io.rob.mmio(i) := RegNext(s1_valid(i) && s1_req(i).mmio)
481    io.rob.uop(i) := RegEnable(s1_req(i).uop, s1_valid(i))
482  }
483
484
485  /******************************************************************
486   * Deallocate
487   ******************************************************************/
488  // UncacheBuffer deallocate
489  val freeMaskVec = Wire(Vec(LoadUncacheBufferSize, Bool()))
490
491  // init
492  freeMaskVec.map(e => e := false.B)
493
494  // dealloc logic
495  entries.zipWithIndex.foreach {
496    case (e, i) =>
497      when ((e.io.mmioSelect && e.io.mmioOut.fire) || e.io.ncOut.fire || e.io.flush) {
498        freeMaskVec(i) := true.B
499      }
500  }
501
502  freeList.io.free := freeMaskVec.asUInt
503
504
505  /******************************************************************
506   * Uncache rollback detection
507   *
508   * When uncache loads enqueue, it searches uncache loads, They can not enqueue and need re-execution.
509   *
510   * Cycle 0: uncache enqueue.
511   * Cycle 1: Select oldest uncache loads.
512   * Cycle 2: Redirect Fire.
513   *   Choose the oldest load from LoadPipelineWidth oldest loads.
514   *   Prepare redirect request according to the detected rejection.
515   *   Fire redirect request (if valid)
516   *
517   *               Load_S3  .... Load_S3
518   * stage 0:        lq            lq
519   *                 |             | (can not enqueue)
520   * stage 1:        lq            lq
521   *                 |             |
522   *                 ---------------
523   *                        |
524   * stage 2:               lq
525   *                        |
526   *                     rollback req
527   *
528   ******************************************************************/
529  def selectOldestRedirect(xs: Seq[Valid[Redirect]]): Vec[Bool] = {
530    val compareVec = (0 until xs.length).map(i => (0 until i).map(j => isAfter(xs(j).bits.robIdx, xs(i).bits.robIdx)))
531    val resultOnehot = VecInit((0 until xs.length).map(i => Cat((0 until xs.length).map(j =>
532      (if (j < i) !xs(j).valid || compareVec(i)(j)
533      else if (j == i) xs(i).valid
534      else !xs(j).valid || !compareVec(j)(i))
535    )).andR))
536    resultOnehot
537  }
538  val reqNeedCheck = VecInit((0 until LoadPipelineWidth).map(w =>
539    s2_enqueue(w) && !s2_enqValidVec(w)
540  ))
541  val reqSelUops = VecInit(s2_req.map(_.uop))
542  val allRedirect = (0 until LoadPipelineWidth).map(i => {
543    val redirect = Wire(Valid(new Redirect))
544    redirect.valid := reqNeedCheck(i)
545    redirect.bits             := DontCare
546    redirect.bits.isRVC       := reqSelUops(i).preDecodeInfo.isRVC
547    redirect.bits.robIdx      := reqSelUops(i).robIdx
548    redirect.bits.ftqIdx      := reqSelUops(i).ftqPtr
549    redirect.bits.ftqOffset   := reqSelUops(i).ftqOffset
550    redirect.bits.level       := RedirectLevel.flush
551    redirect.bits.cfiUpdate.target := reqSelUops(i).pc // TODO: check if need pc
552    redirect.bits.debug_runahead_checkpoint_id := reqSelUops(i).debugInfo.runahead_checkpoint_id
553    redirect
554  })
555  val oldestOneHot = selectOldestRedirect(allRedirect)
556  val oldestRedirect = Mux1H(oldestOneHot, allRedirect)
557  val lastCycleRedirect = Wire(Valid(new Redirect))
558  lastCycleRedirect.valid := RegNext(io.redirect.valid)
559  lastCycleRedirect.bits := RegEnable(io.redirect.bits, io.redirect.valid)
560  val lastLastCycleRedirect = Wire(Valid(new Redirect))
561  lastLastCycleRedirect.valid := RegNext(lastCycleRedirect.valid)
562  lastLastCycleRedirect.bits := RegEnable(lastCycleRedirect.bits, lastCycleRedirect.valid)
563  io.rollback.valid := GatedValidRegNext(oldestRedirect.valid &&
564                      !oldestRedirect.bits.robIdx.needFlush(io.redirect) &&
565                      !oldestRedirect.bits.robIdx.needFlush(lastCycleRedirect) &&
566                      !oldestRedirect.bits.robIdx.needFlush(lastLastCycleRedirect))
567  io.rollback.bits := RegEnable(oldestRedirect.bits, oldestRedirect.valid)
568
569
570  /******************************************************************
571   * Perf Counter
572   ******************************************************************/
573  val validCount = freeList.io.validCount
574  val allowEnqueue = !freeList.io.empty
575  QueuePerf(LoadUncacheBufferSize, validCount, !allowEnqueue)
576
577  XSPerfAccumulate("mmio_uncache_req", io.uncache.req.fire && !io.uncache.req.bits.nc)
578  XSPerfAccumulate("mmio_writeback_success", io.mmioOut(0).fire)
579  XSPerfAccumulate("mmio_writeback_blocked", io.mmioOut(0).valid && !io.mmioOut(0).ready)
580  XSPerfAccumulate("nc_uncache_req", io.uncache.req.fire && io.uncache.req.bits.nc)
581  XSPerfAccumulate("nc_writeback_success", io.ncOut(0).fire)
582  XSPerfAccumulate("nc_writeback_blocked", io.ncOut(0).valid && !io.ncOut(0).ready)
583  XSPerfAccumulate("uncache_full_rollback", io.rollback.valid)
584
585  val perfEvents: Seq[(String, UInt)] = Seq(
586    ("mmio_uncache_req", io.uncache.req.fire && !io.uncache.req.bits.nc),
587    ("mmio_writeback_success", io.mmioOut(0).fire),
588    ("mmio_writeback_blocked", io.mmioOut(0).valid && !io.mmioOut(0).ready),
589    ("nc_uncache_req", io.uncache.req.fire && io.uncache.req.bits.nc),
590    ("nc_writeback_success", io.ncOut(0).fire),
591    ("nc_writeback_blocked", io.ncOut(0).valid && !io.ncOut(0).ready),
592    ("uncache_full_rollback", io.rollback.valid)
593  )
594  // end
595}
596