xref: /XiangShan/src/main/scala/xiangshan/cache/dcache/mainpipe/WritebackQueue.scala (revision 7a919e05a2f06bab282b84b82a947336ef0c9e7b)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import freechips.rocketchip.tilelink.TLPermissions._
23import freechips.rocketchip.tilelink.{TLArbiter, TLBundleC, TLBundleD, TLEdgeOut}
24import huancun.DirtyKey
25import utils.{HasPerfEvents, HasTLDump, XSDebug, XSPerfAccumulate}
26
27class WritebackReqWodata(implicit p: Parameters) extends DCacheBundle {
28  val addr = UInt(PAddrBits.W)
29  val addr_dup_0 = UInt(PAddrBits.W)
30  val addr_dup_1 = UInt(PAddrBits.W)
31  val param  = UInt(cWidth.W)
32  val voluntary = Bool()
33  val hasData = Bool()
34  val dirty = Bool()
35
36  val delay_release = Bool()
37  val miss_id = UInt(log2Up(cfg.nMissEntries).W)
38
39  def dump() = {
40    XSDebug("WritebackReq addr: %x param: %d voluntary: %b hasData: %b\n",
41      addr, param, voluntary, hasData)
42  }
43}
44
45class WritebackReqData(implicit p: Parameters) extends DCacheBundle {
46  val data = UInt((cfg.blockBytes * 8).W)
47}
48
49class WritebackReq(implicit p: Parameters) extends WritebackReqWodata {
50  val data = UInt((cfg.blockBytes * 8).W)
51
52  override def dump() = {
53    XSDebug("WritebackReq addr: %x param: %d voluntary: %b hasData: %b data: %x\n",
54      addr, param, voluntary, hasData, data)
55  }
56
57  def toWritebackReqWodata(): WritebackReqWodata = {
58    val out = Wire(new WritebackReqWodata)
59    out.addr := addr
60    out.addr_dup_0 := addr_dup_0
61    out.addr_dup_1 := addr_dup_1
62    out.param := param
63    out.voluntary := voluntary
64    out.hasData := hasData
65    out.dirty := dirty
66    out.delay_release := delay_release
67    out.miss_id := miss_id
68    out
69  }
70
71  def toWritebackReqData(): WritebackReqData = {
72    val out = Wire(new WritebackReqData)
73    out.data := data
74    out
75  }
76}
77
78// While a Release sleeps and waits for a refill to wake it up,
79// main pipe might update meta & data during this time.
80// So the meta & data to be released need to be updated too.
81class ReleaseUpdate(implicit p: Parameters) extends DCacheBundle {
82  // only consider store here
83  val addr = UInt(PAddrBits.W)
84  val mask = UInt(DCacheBanks.W)
85  val data = UInt((cfg.blockBytes * 8).W)
86}
87
88// To reduce fanout, miss queue entry data is updated 1 cycle
89// after ReleaseUpdate.fire()
90class MissQueueEntryReleaseUpdate(implicit p: Parameters) extends DCacheBundle {
91  // only consider store here
92  val addr = UInt(PAddrBits.W)
93  val mask_delayed = UInt(DCacheBanks.W)
94  val data_delayed = UInt((cfg.blockBytes * 8).W)
95  val mask_orr = Bool()
96}
97
98class WritebackEntry(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule with HasTLDump
99{
100  val io = IO(new Bundle {
101    val id = Input(UInt())
102    // allocate this entry for new req
103    val primary_valid = Input(Bool())
104    // this entry is free and can be allocated to new reqs
105    val primary_ready = Output(Bool())
106    // this entry is busy, but it can merge the new req
107    val secondary_valid = Input(Bool())
108    val secondary_ready = Output(Bool())
109    val req = Flipped(DecoupledIO(new WritebackReqWodata))
110    val req_data = Input(new WritebackReqData)
111
112    val mem_release = DecoupledIO(new TLBundleC(edge.bundle))
113    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
114
115    val block_addr  = Output(Valid(UInt()))
116
117    val release_wakeup = Flipped(ValidIO(UInt(log2Up(cfg.nMissEntries).W)))
118    val release_update = Flipped(ValidIO(new MissQueueEntryReleaseUpdate))
119  })
120
121  val s_invalid :: s_sleep :: s_release_req :: s_release_resp :: Nil = Enum(4)
122  // ProbeAck:               s_invalid ->            s_release_req
123  // ProbeAck merge Release: s_invalid ->            s_release_req
124  // Release:                s_invalid -> s_sleep -> s_release_req -> s_release_resp
125  // Release merge ProbeAck: s_invalid -> s_sleep -> s_release_req
126  //                        (change Release into ProbeAck when Release is not fired)
127  //                     or: s_invalid -> s_sleep -> s_release_req -> s_release_resp -> s_release_req
128  //                        (send a ProbeAck after Release transaction is over)
129  val state = RegInit(s_invalid)
130  val state_dup_0 = RegInit(s_invalid)
131  val state_dup_1 = RegInit(s_invalid)
132
133  // internal regs
134  // remaining beats
135  val remain = RegInit(0.U(refillCycles.W))
136  val remain_dup_0 = RegInit(0.U(refillCycles.W))
137  val remain_dup_1 = RegInit(0.U(refillCycles.W))
138  val remain_set = WireInit(0.U(refillCycles.W))
139  val remain_clr = WireInit(0.U(refillCycles.W))
140  remain := (remain | remain_set) & ~remain_clr
141  remain_dup_0 := (remain_dup_0 | remain_set) & ~remain_clr
142  remain_dup_1 := (remain_dup_1 | remain_set) & ~remain_clr
143
144  // writeback queue data
145  val data = Reg(UInt((cfg.blockBytes * 8).W))
146
147  // pending data write
148  // !s_data_override means there is an in-progress data write
149  val s_data_override = RegInit(true.B)
150  // !s_data_merge means there is an in-progress data merge
151  val s_data_merge = RegInit(true.B)
152
153  // there are valid request that can be sent to release bus
154  val busy = remain.orR && s_data_override && s_data_merge // have remain beats and data write finished
155
156  val req  = Reg(new WritebackReqWodata)
157
158  // assign default signals to output signals
159  io.req.ready := false.B
160  io.mem_release.valid := false.B
161  io.mem_release.bits  := DontCare
162  io.mem_grant.ready   := false.B
163  io.block_addr.valid  := state =/= s_invalid
164  io.block_addr.bits   := req.addr
165
166  s_data_override := true.B // data_override takes only 1 cycle
167  s_data_merge := true.B // data_merge takes only 1 cycle
168
169
170  when (state =/= s_invalid) {
171    XSDebug("WritebackEntry: %d state: %d block_addr: %x\n", io.id, state, io.block_addr.bits)
172  }
173
174  def mergeData(old_data: UInt, new_data: UInt, wmask: UInt): UInt = {
175    val full_wmask = FillInterleaved(64, wmask)
176    (~full_wmask & old_data | full_wmask & new_data)
177  }
178
179  // --------------------------------------------------------------------------------
180  // s_invalid: receive requests
181  // new req entering
182  when (io.req.valid && io.primary_valid && io.primary_ready) {
183    assert (remain === 0.U)
184    req := io.req.bits
185    s_data_override := false.B
186    when (io.req.bits.delay_release) {
187      state := s_sleep
188      state_dup_0 := s_sleep
189      state_dup_1 := s_sleep
190    }.otherwise {
191      state := s_release_req
192      state_dup_0 := s_release_req
193      state_dup_1 := s_release_req
194      remain_set := Mux(io.req.bits.hasData, ~0.U(refillCycles.W), 1.U(refillCycles.W))
195    }
196  }
197
198  // --------------------------------------------------------------------------------
199  // s_sleep: wait for refill pipe to inform me that I can keep releasing
200  val merge = io.secondary_valid && io.secondary_ready
201  when (state === s_sleep) {
202    assert(remain === 0.U)
203    // There shouldn't be a new Release with the same addr in sleep state
204    assert(!(merge && io.req.bits.voluntary))
205
206    val update = io.release_update.valid && io.release_update.bits.addr === req.addr
207    when (update) {
208      req.hasData := req.hasData || io.release_update.bits.mask_orr
209      req.dirty := req.dirty || io.release_update.bits.mask_orr
210      s_data_merge := false.B
211    }.elsewhen (merge) {
212      state := s_release_req
213      state_dup_0 := s_release_req
214      state_dup_1 := s_release_req
215      req.voluntary := false.B
216      req.param := req.param
217      req.hasData := req.hasData || io.req.bits.hasData
218      req.dirty := req.dirty || io.req.bits.dirty
219      s_data_override := !io.req.bits.hasData // update data when io.req.bits.hasData
220      req.delay_release := false.B
221      remain_set := Mux(req.hasData || io.req.bits.hasData, ~0.U(refillCycles.W), 1.U(refillCycles.W))
222    }
223
224    when (io.release_wakeup.valid && io.release_wakeup.bits === req.miss_id) {
225      state := s_release_req
226      state_dup_0 := s_release_req
227      state_dup_1 := s_release_req
228      req.delay_release := false.B
229      remain_set := Mux(
230        req.hasData || update && io.release_update.bits.mask_orr || merge && io.req.bits.hasData,
231        ~0.U(refillCycles.W),
232        1.U(refillCycles.W)
233      )
234    }
235  }
236
237  // --------------------------------------------------------------------------------
238  // while there beats remaining to be sent, we keep sending
239  // which beat to send in this cycle?
240  val beat = PriorityEncoder(remain_dup_0)
241
242  val beat_data = Wire(Vec(refillCycles, UInt(beatBits.W)))
243  for (i <- 0 until refillCycles) {
244    beat_data(i) := data((i + 1) * beatBits - 1, i * beatBits)
245  }
246
247  val probeResponse = edge.ProbeAck(
248    fromSource = io.id,
249    toAddress = req.addr_dup_0,
250    lgSize = log2Ceil(cfg.blockBytes).U,
251    reportPermissions = req.param
252  )
253
254  val probeResponseData = edge.ProbeAck(
255    fromSource = io.id,
256    toAddress = req.addr_dup_0,
257    lgSize = log2Ceil(cfg.blockBytes).U,
258    reportPermissions = req.param,
259    data = beat_data(beat)
260  )
261
262  val voluntaryRelease = edge.Release(
263    fromSource = io.id,
264    toAddress = req.addr_dup_1,
265    lgSize = log2Ceil(cfg.blockBytes).U,
266    shrinkPermissions = req.param
267  )._2
268
269  val voluntaryReleaseData = edge.Release(
270    fromSource = io.id,
271    toAddress = req.addr_dup_1,
272    lgSize = log2Ceil(cfg.blockBytes).U,
273    shrinkPermissions = req.param,
274    data = beat_data(beat)
275  )._2
276
277  voluntaryReleaseData.echo.lift(DirtyKey).foreach(_ := req.dirty)
278  when(busy) {
279    assert(!req.dirty || req.hasData)
280  }
281
282  io.mem_release.valid := busy
283  io.mem_release.bits  := Mux(req.voluntary,
284    Mux(req.hasData, voluntaryReleaseData, voluntaryRelease),
285    Mux(req.hasData, probeResponseData, probeResponse))
286
287  when (io.mem_release.fire()) { remain_clr := PriorityEncoderOH(remain_dup_1) }
288
289  val (_, _, release_done, _) = edge.count(io.mem_release)
290
291//  when (state === s_release_req && release_done) {
292//    state := Mux(req.voluntary, s_release_resp, s_invalid)
293//  }
294
295  // Because now wbq merges a same-addr req unconditionally, when the req to be merged comes too late,
296  // the previous req might not be able to merge. Thus we have to handle the new req later after the
297  // previous one finishes.
298  // TODO: initiate these
299  val release_later = RegInit(false.B)
300  val c_already_sent = RegInit(false.B)
301  def tmp_req() = new Bundle {
302    val param = UInt(cWidth.W)
303    val voluntary = Bool()
304    val hasData = Bool()
305    val dirty = Bool()
306    val delay_release = Bool()
307    val miss_id = UInt(log2Up(cfg.nMissEntries).W)
308
309    def toWritebackReq = {
310      val r = Wire(new WritebackReq())
311      r.data := data
312      r.addr := req.addr
313      r.addr_dup_0 := req.addr_dup_0
314      r.addr_dup_1 := req.addr_dup_1
315      r.param := param
316      r.voluntary := voluntary
317      r.hasData := hasData
318      r.dirty := dirty
319      r.delay_release := delay_release
320      r.miss_id := miss_id
321      r
322    }
323  }
324  val req_later = Reg(tmp_req())
325
326  when (state_dup_0 === s_release_req) {
327    when (io.mem_release.fire()) {
328      c_already_sent := !release_done
329    }
330
331    when (req.voluntary) {
332      // The previous req is Release
333      when (release_done) {
334        state := s_release_resp
335        state_dup_0 := s_release_resp
336        state_dup_1 := s_release_resp
337      }
338      // merge a ProbeAck
339      when (merge) {
340        when (io.mem_release.fire() || c_already_sent) {
341          // too late to merge, handle the ProbeAck later
342          release_later := true.B
343          req_later.param := io.req.bits.param
344          req_later.voluntary := io.req.bits.voluntary
345          req_later.hasData := io.req.bits.hasData
346          req_later.dirty := io.req.bits.dirty
347          req_later.delay_release := io.req.bits.delay_release
348          req_later.miss_id := io.req.bits.miss_id
349        }.otherwise {
350          // Release hasn't been sent out yet, change Release to ProbeAck
351          req.voluntary := false.B
352          req.hasData := req.hasData || io.req.bits.hasData
353          req.dirty := req.dirty || io.req.bits.dirty
354          s_data_override := false.B
355          req.delay_release := false.B
356          remain_set := Mux(req.hasData || io.req.bits.hasData, ~0.U(refillCycles.W), 1.U(refillCycles.W))
357        }
358      }
359    }.otherwise {
360      // The previous req is ProbeAck
361      when (merge) {
362        release_later := true.B
363        req_later.param := io.req.bits.param
364        req_later.voluntary := io.req.bits.voluntary
365        req_later.hasData := io.req.bits.hasData
366        req_later.dirty := io.req.bits.dirty
367        req_later.delay_release := io.req.bits.delay_release
368        req_later.miss_id := io.req.bits.miss_id
369      }
370
371      when (release_done) {
372        when (merge) {
373          // Send the Release after ProbeAck
374//          state := s_release_req
375//          req := Mux(merge, io.req.bits, req_later.toWritebackReq)
376//          release_later := false.B
377          state := s_sleep
378          state_dup_0 := s_sleep
379          state_dup_1 := s_sleep
380          req := io.req.bits
381          release_later := false.B
382        }.elsewhen (release_later) {
383          state := Mux(
384            io.release_wakeup.valid && io.release_wakeup.bits === req_later.miss_id || !req_later.delay_release,
385            s_release_req,
386            s_sleep
387          )
388          state_dup_0 := Mux(
389            io.release_wakeup.valid && io.release_wakeup.bits === req_later.miss_id || !req_later.delay_release,
390            s_release_req,
391            s_sleep
392          )
393          state_dup_1 := Mux(
394            io.release_wakeup.valid && io.release_wakeup.bits === req_later.miss_id || !req_later.delay_release,
395            s_release_req,
396            s_sleep
397          )
398          req := req_later.toWritebackReq
399          when (io.release_wakeup.valid && io.release_wakeup.bits === req_later.miss_id) {
400            req.delay_release := false.B
401          }
402          release_later := false.B
403        }.otherwise {
404          state := s_invalid
405          state_dup_0 := s_invalid
406          state_dup_1 := s_invalid
407          release_later := false.B
408        }
409      }
410
411      when (io.release_wakeup.valid && io.release_wakeup.bits === req_later.miss_id) {
412        req_later.delay_release := false.B
413      }
414    }
415  }
416
417  // --------------------------------------------------------------------------------
418  // receive ReleaseAck for Releases
419  when (state_dup_0 === s_release_resp) {
420    io.mem_grant.ready := true.B
421
422    when (merge) {
423      release_later := true.B
424      req_later.param := io.req.bits.param
425      req_later.voluntary := io.req.bits.voluntary
426      req_later.hasData := io.req.bits.hasData
427      req_later.dirty := io.req.bits.dirty
428      req_later.delay_release := io.req.bits.delay_release
429      req_later.miss_id := io.req.bits.miss_id
430    }
431    when (io.mem_grant.fire()) {
432      when (merge) {
433        state := s_release_req
434        state_dup_0 := s_release_req
435        state_dup_1 := s_release_req
436        req := io.req.bits
437        remain_set := Mux(io.req.bits.hasData, ~0.U(refillCycles.W), 1.U(refillCycles.W))
438        release_later := false.B
439      }.elsewhen(release_later) {
440        state := s_release_req
441        state_dup_0 := s_release_req
442        state_dup_1 := s_release_req
443        req := req_later.toWritebackReq
444        remain_set := Mux(req_later.hasData, ~0.U(refillCycles.W), 1.U(refillCycles.W))
445        release_later := false.B
446      }.otherwise {
447        state := s_invalid
448        state_dup_0 := s_invalid
449        state_dup_1 := s_invalid
450        release_later := false.B
451      }
452    }
453  }
454
455  // When does this entry merge a new req?
456  // 1. When this entry is free
457  // 2. When this entry wants to release while still waiting for release_wakeup signal,
458  //    and a probe req with the same addr comes. In this case we merge probe with release,
459  //    handle this probe, so we don't need another release.
460  io.primary_ready := state_dup_1 === s_invalid
461  io.secondary_ready := state_dup_1 =/= s_invalid && io.req.bits.addr === req.addr
462
463  // data update logic
464  when (!s_data_merge) {
465    data := mergeData(data, io.release_update.bits.data_delayed, io.release_update.bits.mask_delayed)
466  }
467
468  when (!s_data_override) {
469    data := io.req_data.data
470  }
471
472  assert(!RegNext(!s_data_merge && !s_data_override))
473
474  // performance counters
475  XSPerfAccumulate("wb_req", io.req.fire())
476  XSPerfAccumulate("wb_release", state === s_release_req && release_done && req.voluntary)
477  XSPerfAccumulate("wb_probe_resp", state_dup_0 === s_release_req && release_done && !req.voluntary)
478  XSPerfAccumulate("penalty_blocked_by_channel_C", io.mem_release.valid && !io.mem_release.ready)
479  XSPerfAccumulate("penalty_waiting_for_channel_D", io.mem_grant.ready && !io.mem_grant.valid && state_dup_1 === s_release_resp)
480}
481
482class WritebackQueue(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule with HasTLDump with HasPerfEvents {
483  val io = IO(new Bundle {
484    val req = Flipped(DecoupledIO(new WritebackReq))
485    val mem_release = DecoupledIO(new TLBundleC(edge.bundle))
486    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
487
488    val release_wakeup = Flipped(ValidIO(UInt(log2Up(cfg.nMissEntries).W)))
489    val release_update = Flipped(ValidIO(new ReleaseUpdate))
490
491    val miss_req = Flipped(Valid(UInt()))
492    val block_miss_req = Output(Bool())
493  })
494
495  require(cfg.nReleaseEntries > cfg.nMissEntries)
496
497  val primary_ready_vec = Wire(Vec(cfg.nReleaseEntries, Bool()))
498  val secondary_ready_vec = Wire(Vec(cfg.nReleaseEntries, Bool()))
499  val accept = Cat(primary_ready_vec).orR
500  val merge = Cat(secondary_ready_vec).orR
501  val alloc = accept && !merge
502  // When there are empty entries, merge or allocate a new entry.
503  // When there is no empty entry, reject it even if it can be merged.
504  io.req.ready := accept
505
506  // assign default values to output signals
507  io.mem_release.valid := false.B
508  io.mem_release.bits  := DontCare
509  io.mem_grant.ready   := false.B
510
511  // dalay data write in miss queue release update for 1 cycle
512  val release_update_bits_for_entry = Wire(new MissQueueEntryReleaseUpdate)
513  release_update_bits_for_entry.addr := io.release_update.bits.addr
514  release_update_bits_for_entry.mask_delayed := RegEnable(io.release_update.bits.mask, io.release_update.valid)
515  release_update_bits_for_entry.data_delayed := RegEnable(io.release_update.bits.data, io.release_update.valid)
516  release_update_bits_for_entry.mask_orr := io.release_update.bits.mask.orR
517
518  // delay data write in miss queue req for 1 cycle
519  val req_data = RegEnable(io.req.bits.toWritebackReqData(), io.req.valid)
520
521  require(isPow2(cfg.nMissEntries))
522  val grant_source = io.mem_grant.bits.source
523  val entries = Seq.fill(cfg.nReleaseEntries)(Module(new WritebackEntry(edge)))
524  entries.zipWithIndex.foreach {
525    case (entry, i) =>
526      val former_primary_ready = if(i == 0)
527        false.B
528      else
529        Cat((0 until i).map(j => entries(j).io.primary_ready)).orR
530      val entry_id = (i + releaseIdBase).U
531
532      entry.io.id := entry_id
533
534      // entry req
535      entry.io.req.valid := io.req.valid
536      primary_ready_vec(i)   := entry.io.primary_ready
537      secondary_ready_vec(i) := entry.io.secondary_ready
538      entry.io.req.bits  := io.req.bits
539      entry.io.req_data  := req_data
540
541      entry.io.primary_valid := alloc &&
542        !former_primary_ready &&
543        entry.io.primary_ready
544      entry.io.secondary_valid := io.req.valid && accept
545
546      entry.io.mem_grant.valid := (entry_id === grant_source) && io.mem_grant.valid
547      entry.io.mem_grant.bits  := io.mem_grant.bits
548
549      entry.io.release_wakeup := io.release_wakeup
550      entry.io.release_update.valid := io.release_update.valid
551      entry.io.release_update.bits := release_update_bits_for_entry // data write delayed
552  }
553  assert(RegNext(!(io.mem_grant.valid && !io.mem_grant.ready)))
554  io.mem_grant.ready := true.B
555
556  val miss_req_conflict = VecInit(entries.map(e => e.io.block_addr.valid && e.io.block_addr.bits === io.miss_req.bits)).asUInt.orR
557  io.block_miss_req := io.miss_req.valid && miss_req_conflict
558
559  TLArbiter.robin(edge, io.mem_release, entries.map(_.io.mem_release):_*)
560
561  // sanity check
562  // print all input/output requests for debug purpose
563  // print req
564  when (io.req.fire()) {
565    io.req.bits.dump()
566  }
567
568  when (io.mem_release.fire()) {
569    io.mem_release.bits.dump
570  }
571
572  when (io.mem_grant.fire()) {
573    io.mem_grant.bits.dump
574  }
575
576  when (io.miss_req.valid) {
577    XSDebug("miss_req: addr: %x\n", io.miss_req.bits)
578  }
579
580  when (io.block_miss_req) {
581    XSDebug("block_miss_req\n")
582  }
583
584  // performance counters
585  XSPerfAccumulate("wb_req", io.req.fire())
586
587  val perfValidCount = RegNext(PopCount(entries.map(e => e.io.block_addr.valid)))
588  val perfEvents = Seq(
589    ("dcache_wbq_req      ", io.req.fire()),
590    ("dcache_wbq_1_4_valid", (perfValidCount < (cfg.nReleaseEntries.U/4.U))),
591    ("dcache_wbq_2_4_valid", (perfValidCount > (cfg.nReleaseEntries.U/4.U)) & (perfValidCount <= (cfg.nReleaseEntries.U/2.U))),
592    ("dcache_wbq_3_4_valid", (perfValidCount > (cfg.nReleaseEntries.U/2.U)) & (perfValidCount <= (cfg.nReleaseEntries.U*3.U/4.U))),
593    ("dcache_wbq_4_4_valid", (perfValidCount > (cfg.nReleaseEntries.U*3.U/4.U))),
594  )
595  generatePerfEvent()
596}
597