xref: /XiangShan/src/main/scala/xiangshan/backend/fu/PMP.scala (revision b6982e83d6fe4f8c3d111ebc70665f115e470ddf)
1*b6982e83SLemover/***************************************************************************************
2*b6982e83SLemover* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3*b6982e83SLemover* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*b6982e83SLemover*
5*b6982e83SLemover* XiangShan is licensed under Mulan PSL v2.
6*b6982e83SLemover* You can use this software according to the terms and conditions of the Mulan PSL v2.
7*b6982e83SLemover* You may obtain a copy of Mulan PSL v2 at:
8*b6982e83SLemover*          http://license.coscl.org.cn/MulanPSL2
9*b6982e83SLemover*
10*b6982e83SLemover* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11*b6982e83SLemover* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12*b6982e83SLemover* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*b6982e83SLemover*
14*b6982e83SLemover* See the Mulan PSL v2 for more details.
15*b6982e83SLemover***************************************************************************************/
16*b6982e83SLemover
17*b6982e83SLemoverpackage xiangshan.backend.fu
18*b6982e83SLemover
19*b6982e83SLemoverimport chipsalliance.rocketchip.config.Parameters
20*b6982e83SLemoverimport chisel3._
21*b6982e83SLemoverimport chisel3.internal.naming.chiselName
22*b6982e83SLemoverimport chisel3.util._
23*b6982e83SLemoverimport utils.MaskedRegMap.WritableMask
24*b6982e83SLemoverimport xiangshan._
25*b6982e83SLemoverimport xiangshan.backend.fu.util.HasCSRConst
26*b6982e83SLemoverimport utils._
27*b6982e83SLemoverimport xiangshan.cache.mmu.{TlbCmd, TlbExceptionBundle}
28*b6982e83SLemover
29*b6982e83SLemovertrait PMPConst {
30*b6982e83SLemover  val PMPOffBits = 2 // minimal 4bytes
31*b6982e83SLemover}
32*b6982e83SLemover
33*b6982e83SLemoverabstract class PMPBundle(implicit p: Parameters) extends XSBundle with PMPConst {
34*b6982e83SLemover  val CoarserGrain: Boolean = PlatformGrain > PMPOffBits
35*b6982e83SLemover}
36*b6982e83SLemover
37*b6982e83SLemoverabstract class PMPModule(implicit p: Parameters) extends XSModule with PMPConst with HasCSRConst
38*b6982e83SLemover
39*b6982e83SLemover@chiselName
40*b6982e83SLemoverclass PMPConfig(implicit p: Parameters) extends PMPBundle {
41*b6982e83SLemover  val l = Bool()
42*b6982e83SLemover  val res = UInt(2.W)
43*b6982e83SLemover  val a = UInt(2.W)
44*b6982e83SLemover  val x = Bool()
45*b6982e83SLemover  val w = Bool()
46*b6982e83SLemover  val r = Bool()
47*b6982e83SLemover
48*b6982e83SLemover  def off = a === 0.U
49*b6982e83SLemover  def tor = a === 1.U
50*b6982e83SLemover  def na4 = { if (CoarserGrain) false.B else a === 2.U }
51*b6982e83SLemover  def napot = { if (CoarserGrain) a(1).asBool else a === 3.U }
52*b6982e83SLemover  def off_tor = !a(1)
53*b6982e83SLemover  def na4_napot = a(1)
54*b6982e83SLemover
55*b6982e83SLemover  def locked = l
56*b6982e83SLemover  def addr_locked: Bool = locked
57*b6982e83SLemover  def addr_locked(next: PMPConfig): Bool = locked || (next.locked && next.tor)
58*b6982e83SLemover
59*b6982e83SLemover  def write_cfg_vec(cfgs: UInt): UInt = {
60*b6982e83SLemover    val cfgVec = Wire(Vec(cfgs.getWidth/8, new PMPConfig))
61*b6982e83SLemover    for (i <- cfgVec.indices) {
62*b6982e83SLemover      val tmp = cfgs((i+1)*8-1, i*8).asUInt.asTypeOf(new PMPConfig)
63*b6982e83SLemover      cfgVec(i) := tmp
64*b6982e83SLemover      cfgVec(i).w := tmp.w && tmp.r
65*b6982e83SLemover      if (CoarserGrain) { cfgVec(i).a := Cat(tmp.a(1), tmp.a.orR) }
66*b6982e83SLemover    }
67*b6982e83SLemover    cfgVec.asUInt
68*b6982e83SLemover  }
69*b6982e83SLemover
70*b6982e83SLemover  def write_cfg_vec(mask: Vec[UInt], addr: Vec[UInt], index: Int)(cfgs: UInt): UInt = {
71*b6982e83SLemover    val cfgVec = Wire(Vec(cfgs.getWidth/8, new PMPConfig))
72*b6982e83SLemover    for (i <- cfgVec.indices) {
73*b6982e83SLemover      val tmp = cfgs((i+1)*8-1, i*8).asUInt.asTypeOf(new PMPConfig)
74*b6982e83SLemover      cfgVec(i) := tmp
75*b6982e83SLemover      cfgVec(i).w := tmp.w && tmp.r
76*b6982e83SLemover      if (CoarserGrain) { cfgVec(i).a := Cat(tmp.a(1), tmp.a.orR) }
77*b6982e83SLemover      when (cfgVec(i).na4_napot) {
78*b6982e83SLemover        mask(index + i) := new PMPEntry().match_mask(cfgVec(i), addr(index + i))
79*b6982e83SLemover      }
80*b6982e83SLemover    }
81*b6982e83SLemover    cfgVec.asUInt
82*b6982e83SLemover  }
83*b6982e83SLemover
84*b6982e83SLemover  def reset() = {
85*b6982e83SLemover    l := false.B
86*b6982e83SLemover    a := 0.U
87*b6982e83SLemover  }
88*b6982e83SLemover}
89*b6982e83SLemover
90*b6982e83SLemover/** PMPBase for CSR unit
91*b6982e83SLemover  * with only read and write logic
92*b6982e83SLemover  */
93*b6982e83SLemover@chiselName
94*b6982e83SLemoverclass PMPBase(implicit p: Parameters) extends PMPBundle {
95*b6982e83SLemover  val cfg = new PMPConfig
96*b6982e83SLemover  val addr = UInt((PAddrBits - PMPOffBits).W)
97*b6982e83SLemover
98*b6982e83SLemover  /** In general, the PMP grain is 2**{G+2} bytes. when G >= 1, na4 is not selectable.
99*b6982e83SLemover    * When G >= 2 and cfg.a(1) is set(then the mode is napot), the bits addr(G-2, 0) read as zeros.
100*b6982e83SLemover    * When G >= 1 and cfg.a(1) is clear(the mode is off or tor), the addr(G-1, 0) read as zeros.
101*b6982e83SLemover    * The low OffBits is dropped
102*b6982e83SLemover    */
103*b6982e83SLemover  def read_addr(): UInt = {
104*b6982e83SLemover    read_addr(cfg)(addr)
105*b6982e83SLemover  }
106*b6982e83SLemover
107*b6982e83SLemover  def read_addr(cfg: PMPConfig)(addr: UInt): UInt = {
108*b6982e83SLemover    val G = PlatformGrain - PMPOffBits
109*b6982e83SLemover    require(G >= 0)
110*b6982e83SLemover    if (G == 0) {
111*b6982e83SLemover      addr
112*b6982e83SLemover    } else if (G >= 2) {
113*b6982e83SLemover      Mux(cfg.na4_napot, set_low_bits(addr, G-1), clear_low_bits(addr, G))
114*b6982e83SLemover    } else { // G is 1
115*b6982e83SLemover      Mux(cfg.off_tor, clear_low_bits(addr, G), addr)
116*b6982e83SLemover    }
117*b6982e83SLemover  }
118*b6982e83SLemover  /** addr for inside addr, drop OffBits with.
119*b6982e83SLemover    * compare_addr for inside addr for comparing.
120*b6982e83SLemover    * paddr for outside addr.
121*b6982e83SLemover    */
122*b6982e83SLemover  def write_addr(next: PMPBase)(paddr: UInt) = {
123*b6982e83SLemover    Mux(!cfg.addr_locked(next.cfg), paddr, addr)
124*b6982e83SLemover  }
125*b6982e83SLemover  def write_addr(paddr: UInt) = {
126*b6982e83SLemover    Mux(!cfg.addr_locked, paddr, addr)
127*b6982e83SLemover  }
128*b6982e83SLemover
129*b6982e83SLemover  def set_low_bits(data: UInt, num: Int): UInt = {
130*b6982e83SLemover    require(num >= 0)
131*b6982e83SLemover    data | ((1 << num)-1).U
132*b6982e83SLemover  }
133*b6982e83SLemover
134*b6982e83SLemover  /** mask the data's low num bits (lsb) */
135*b6982e83SLemover  def clear_low_bits(data: UInt, num: Int): UInt = {
136*b6982e83SLemover    require(num >= 0)
137*b6982e83SLemover    // use Cat instead of & with mask to avoid "Signal Width" problem
138*b6982e83SLemover    if (num == 0) { data }
139*b6982e83SLemover    else { Cat(data(data.getWidth-1, num), 0.U(num.W)) }
140*b6982e83SLemover  }
141*b6982e83SLemover
142*b6982e83SLemover  def gen(cfg: PMPConfig, addr: UInt) = {
143*b6982e83SLemover    require(addr.getWidth == this.addr.getWidth)
144*b6982e83SLemover    this.cfg := cfg
145*b6982e83SLemover    this.addr := addr
146*b6982e83SLemover  }
147*b6982e83SLemover}
148*b6982e83SLemover
149*b6982e83SLemover/** PMPEntry for outside pmp copies
150*b6982e83SLemover  * with one more elements mask to help napot match
151*b6982e83SLemover  * TODO: make mask an element, not an method, for timing opt
152*b6982e83SLemover  */
153*b6982e83SLemover@chiselName
154*b6982e83SLemoverclass PMPEntry(implicit p: Parameters) extends PMPBase {
155*b6982e83SLemover  val mask = UInt(PAddrBits.W) // help to match in napot
156*b6982e83SLemover
157*b6982e83SLemover  /** compare_addr is used to compare with input addr */
158*b6982e83SLemover  def compare_addr = ((addr << PMPOffBits) & ~(((1 << PlatformGrain) - 1).U(PAddrBits.W))).asUInt
159*b6982e83SLemover
160*b6982e83SLemover  def write_addr(next: PMPBase, mask: UInt)(paddr: UInt) = {
161*b6982e83SLemover    mask := Mux(!cfg.addr_locked(next.cfg), match_mask(paddr), mask)
162*b6982e83SLemover    Mux(!cfg.addr_locked(next.cfg), paddr, addr)
163*b6982e83SLemover  }
164*b6982e83SLemover
165*b6982e83SLemover  def write_addr(mask: UInt)(paddr: UInt) = {
166*b6982e83SLemover    mask := Mux(!cfg.addr_locked, match_mask(paddr), mask)
167*b6982e83SLemover    Mux(!cfg.addr_locked, paddr, addr)
168*b6982e83SLemover  }
169*b6982e83SLemover  /** size and maxSize are all log2 Size
170*b6982e83SLemover    * for dtlb, the maxSize is bXLEN which is 8
171*b6982e83SLemover    * for itlb and ptw, the maxSize is log2(512) ?
172*b6982e83SLemover    * but we may only need the 64 bytes? how to prevent the bugs?
173*b6982e83SLemover    * TODO: handle the special case that itlb & ptw & dcache access wider size than XLEN
174*b6982e83SLemover    */
175*b6982e83SLemover  def is_match(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last_pmp: PMPEntry): Bool = {
176*b6982e83SLemover    Mux(cfg.na4_napot, napotMatch(paddr, lgSize, lgMaxSize),
177*b6982e83SLemover      Mux(cfg.tor, torMatch(paddr, lgSize, lgMaxSize, last_pmp), false.B))
178*b6982e83SLemover  }
179*b6982e83SLemover
180*b6982e83SLemover  /** generate match mask to help match in napot mode */
181*b6982e83SLemover  def match_mask(paddr: UInt) = {
182*b6982e83SLemover    val tmp_addr = Cat(paddr, cfg.a(0)) | (((1 << PlatformGrain) - 1) >> PMPOffBits).U((paddr.getWidth + 1).W)
183*b6982e83SLemover    Cat(tmp_addr & ~(tmp_addr + 1.U), ((1 << PMPOffBits) - 1).U(PMPOffBits.W))
184*b6982e83SLemover  }
185*b6982e83SLemover
186*b6982e83SLemover  def match_mask(cfg: PMPConfig, paddr: UInt) = {
187*b6982e83SLemover    val tmp_addr = Cat(paddr, cfg.a(0)) | (((1 << PlatformGrain) - 1) >> PMPOffBits).U((paddr.getWidth + 1).W)
188*b6982e83SLemover    Cat(tmp_addr & ~(tmp_addr + 1.U), ((1 << PMPOffBits) - 1).U(PMPOffBits.W))
189*b6982e83SLemover  }
190*b6982e83SLemover
191*b6982e83SLemover  def boundMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int) = {
192*b6982e83SLemover    if (lgMaxSize <= PlatformGrain) {
193*b6982e83SLemover      paddr < compare_addr
194*b6982e83SLemover    } else {
195*b6982e83SLemover      val highLess = (paddr >> lgMaxSize) < (compare_addr >> lgMaxSize)
196*b6982e83SLemover      val highEqual = (paddr >> lgMaxSize) === (compare_addr >> lgMaxSize)
197*b6982e83SLemover      val lowLess = (paddr(lgMaxSize-1, 0) | OneHot.UIntToOH1(lgSize, lgMaxSize))  < compare_addr(lgMaxSize-1, 0)
198*b6982e83SLemover      highLess || (highEqual && lowLess)
199*b6982e83SLemover    }
200*b6982e83SLemover  }
201*b6982e83SLemover
202*b6982e83SLemover  def lowerBoundMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int) = {
203*b6982e83SLemover    !boundMatch(paddr, lgSize, lgMaxSize)
204*b6982e83SLemover  }
205*b6982e83SLemover
206*b6982e83SLemover  def higherBoundMatch(paddr: UInt, lgMaxSize: Int) = {
207*b6982e83SLemover    boundMatch(paddr, 0.U, lgMaxSize)
208*b6982e83SLemover  }
209*b6982e83SLemover
210*b6982e83SLemover  def torMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last_pmp: PMPEntry) = {
211*b6982e83SLemover    last_pmp.lowerBoundMatch(paddr, lgSize, lgMaxSize) && higherBoundMatch(paddr, lgMaxSize)
212*b6982e83SLemover  }
213*b6982e83SLemover
214*b6982e83SLemover  def unmaskEqual(a: UInt, b: UInt, m: UInt) = {
215*b6982e83SLemover    (a & ~m) === (b & ~m)
216*b6982e83SLemover  }
217*b6982e83SLemover
218*b6982e83SLemover  def napotMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int) = {
219*b6982e83SLemover    if (lgMaxSize <= PlatformGrain) {
220*b6982e83SLemover      unmaskEqual(paddr, compare_addr, mask)
221*b6982e83SLemover    } else {
222*b6982e83SLemover      val lowMask = mask | OneHot.UIntToOH1(lgSize, lgMaxSize)
223*b6982e83SLemover      val highMatch = unmaskEqual(paddr >> lgMaxSize, compare_addr >> lgMaxSize, mask >> lgMaxSize)
224*b6982e83SLemover      val lowMatch = unmaskEqual(paddr(lgMaxSize-1, 0), compare_addr(lgMaxSize-1, 0), lowMask(lgMaxSize-1, 0))
225*b6982e83SLemover      highMatch && lowMatch
226*b6982e83SLemover    }
227*b6982e83SLemover  }
228*b6982e83SLemover
229*b6982e83SLemover  def aligned(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last: PMPEntry) = {
230*b6982e83SLemover    if (lgMaxSize <= PlatformGrain) {
231*b6982e83SLemover      true.B
232*b6982e83SLemover    } else {
233*b6982e83SLemover      val lowBitsMask = OneHot.UIntToOH1(lgSize, lgMaxSize)
234*b6982e83SLemover      val lowerBound = ((paddr >> lgMaxSize) === (last.compare_addr >> lgMaxSize)) &&
235*b6982e83SLemover        ((~paddr(lgMaxSize-1, 0) & last.compare_addr(lgMaxSize-1, 0)) =/= 0.U)
236*b6982e83SLemover      val upperBound = ((paddr >> lgMaxSize) === (compare_addr >> lgMaxSize)) &&
237*b6982e83SLemover        ((compare_addr(lgMaxSize-1, 0) & (paddr(lgMaxSize-1, 0) | lowBitsMask)) =/= 0.U)
238*b6982e83SLemover      val torAligned = !(lowerBound || upperBound)
239*b6982e83SLemover      val napotAligned = (lowBitsMask & ~mask(lgMaxSize-1, 0)) === 0.U
240*b6982e83SLemover      Mux(cfg.na4_napot, napotAligned, torAligned)
241*b6982e83SLemover    }
242*b6982e83SLemover  }
243*b6982e83SLemover
244*b6982e83SLemover  def gen(cfg: PMPConfig, addr: UInt, mask: UInt) = {
245*b6982e83SLemover    require(addr.getWidth == this.addr.getWidth)
246*b6982e83SLemover    this.cfg := cfg
247*b6982e83SLemover    this.addr := addr
248*b6982e83SLemover    this.mask := mask
249*b6982e83SLemover  }
250*b6982e83SLemover
251*b6982e83SLemover  def reset() = {
252*b6982e83SLemover    cfg.l := 0.U
253*b6982e83SLemover    cfg.a := 0.U
254*b6982e83SLemover  }
255*b6982e83SLemover}
256*b6982e83SLemover
257*b6982e83SLemover@chiselName
258*b6982e83SLemoverclass PMP(implicit p: Parameters) extends PMPModule {
259*b6982e83SLemover  val io = IO(new Bundle {
260*b6982e83SLemover    val distribute_csr = Flipped(new DistributedCSRIO())
261*b6982e83SLemover    val pmp = Output(Vec(NumPMP, new PMPEntry()))
262*b6982e83SLemover  })
263*b6982e83SLemover
264*b6982e83SLemover  val w = io.distribute_csr.w
265*b6982e83SLemover
266*b6982e83SLemover  val pmp = Wire(Vec(NumPMP, new PMPEntry()))
267*b6982e83SLemover
268*b6982e83SLemover  val pmpCfgPerCSR = XLEN / new PMPConfig().getWidth
269*b6982e83SLemover  def pmpCfgIndex(i: Int) = (XLEN / 32) * (i / pmpCfgPerCSR)
270*b6982e83SLemover
271*b6982e83SLemover  /** to fit MaskedRegMap's write, declare cfgs as Merged CSRs and split them into each pmp */
272*b6982e83SLemover  val cfgMerged = RegInit(VecInit(Seq.fill(NumPMP / pmpCfgPerCSR)(0.U(XLEN.W))))
273*b6982e83SLemover  val cfgs = WireInit(cfgMerged).asTypeOf(Vec(NumPMP, new PMPConfig()))
274*b6982e83SLemover  val addr = Reg(Vec(NumPMP, UInt((PAddrBits-PMPOffBits).W)))
275*b6982e83SLemover  val mask = Reg(Vec(NumPMP, UInt(PAddrBits.W)))
276*b6982e83SLemover
277*b6982e83SLemover  for (i <- pmp.indices) {
278*b6982e83SLemover    pmp(i).gen(cfgs(i), addr(i), mask(i))
279*b6982e83SLemover  }
280*b6982e83SLemover
281*b6982e83SLemover  val cfg_mapping = (0 until NumPMP by pmpCfgPerCSR).map(i => {Map(
282*b6982e83SLemover    MaskedRegMap(
283*b6982e83SLemover      addr = PmpcfgBase + pmpCfgIndex(i),
284*b6982e83SLemover      reg = cfgMerged(i/pmpCfgPerCSR),
285*b6982e83SLemover      wmask = WritableMask,
286*b6982e83SLemover      wfn = new PMPConfig().write_cfg_vec(mask, addr, i)
287*b6982e83SLemover    ))
288*b6982e83SLemover  }).fold(Map())((a, b) => a ++ b) // ugly code, hit me if u have better codes
289*b6982e83SLemover
290*b6982e83SLemover  val addr_mapping = (0 until NumPMP).map(i => {Map(
291*b6982e83SLemover    MaskedRegMap(
292*b6982e83SLemover      addr = PmpaddrBase + i,
293*b6982e83SLemover      reg = addr(i),
294*b6982e83SLemover      wmask = WritableMask,
295*b6982e83SLemover      wfn = { if (i != NumPMP-1) pmp(i).write_addr(pmp(i+1), mask(i)) else pmp(i).write_addr(mask(i)) },
296*b6982e83SLemover      rmask = WritableMask,
297*b6982e83SLemover      rfn = new PMPBase().read_addr(pmp(i).cfg)
298*b6982e83SLemover    ))
299*b6982e83SLemover  }).fold(Map())((a, b) => a ++ b) // ugly code, hit me if u have better codes.
300*b6982e83SLemover  val pmpMapping =  cfg_mapping ++ addr_mapping
301*b6982e83SLemover
302*b6982e83SLemover  val rdata = Wire(UInt(XLEN.W))
303*b6982e83SLemover  MaskedRegMap.generate(pmpMapping, w.bits.addr, rdata, w.valid, w.bits.data)
304*b6982e83SLemover
305*b6982e83SLemover  io.pmp := pmp
306*b6982e83SLemover}
307*b6982e83SLemover
308*b6982e83SLemoverclass PMPReqBundle(lgMaxSize: Int = 3)(implicit p: Parameters) extends PMPBundle {
309*b6982e83SLemover  val addr = Output(UInt(PAddrBits.W))
310*b6982e83SLemover  val size = Output(UInt(log2Ceil(lgMaxSize+1).W))
311*b6982e83SLemover  val cmd = Output(TlbCmd())
312*b6982e83SLemover
313*b6982e83SLemover  override def cloneType = (new PMPReqBundle(lgMaxSize)).asInstanceOf[this.type]
314*b6982e83SLemover}
315*b6982e83SLemover
316*b6982e83SLemoverclass PMPRespBundle(implicit p: Parameters) extends TlbExceptionBundle
317*b6982e83SLemover
318*b6982e83SLemover@chiselName
319*b6982e83SLemoverclass PMPChecker(lgMaxSize: Int = 3, sameCycle: Boolean = false)(implicit p: Parameters) extends PMPModule {
320*b6982e83SLemover  val io = IO(new Bundle{
321*b6982e83SLemover    val env = Input(new Bundle {
322*b6982e83SLemover      val mode = Input(UInt(2.W))
323*b6982e83SLemover      val pmp = Input(Vec(NumPMP, new PMPEntry()))
324*b6982e83SLemover    })
325*b6982e83SLemover    val req = Flipped(Valid(new PMPReqBundle(lgMaxSize))) // usage: assign the valid to fire signal
326*b6982e83SLemover    val resp = Output(new PMPRespBundle())
327*b6982e83SLemover  })
328*b6982e83SLemover
329*b6982e83SLemover  val req = io.req.bits
330*b6982e83SLemover
331*b6982e83SLemover  val passThrough = if (io.env.pmp.isEmpty) true.B else (io.env.mode > ModeS)
332*b6982e83SLemover  val pmpMinuxOne = WireInit(0.U.asTypeOf(new PMPEntry()))
333*b6982e83SLemover  pmpMinuxOne.cfg.r := passThrough
334*b6982e83SLemover  pmpMinuxOne.cfg.w := passThrough
335*b6982e83SLemover  pmpMinuxOne.cfg.x := passThrough
336*b6982e83SLemover
337*b6982e83SLemover  val match_wave = Wire(Vec(NumPMP, Bool()))
338*b6982e83SLemover  val ignore_wave = Wire(Vec(NumPMP, Bool()))
339*b6982e83SLemover  val aligned_wave = Wire(Vec(NumPMP, Bool()))
340*b6982e83SLemover  val prev_wave = Wire(Vec(NumPMP, new PMPEntry()))
341*b6982e83SLemover  val cur_wave = Wire(Vec(NumPMP, new PMPEntry()))
342*b6982e83SLemover
343*b6982e83SLemover  dontTouch(match_wave)
344*b6982e83SLemover  dontTouch(ignore_wave)
345*b6982e83SLemover  dontTouch(aligned_wave)
346*b6982e83SLemover  dontTouch(prev_wave)
347*b6982e83SLemover  dontTouch(cur_wave)
348*b6982e83SLemover
349*b6982e83SLemover  val res = io.env.pmp.zip(pmpMinuxOne +: io.env.pmp.take(NumPMP-1)).zipWithIndex
350*b6982e83SLemover    .reverse.foldLeft(pmpMinuxOne) { case (prev, ((pmp, last_pmp), i)) =>
351*b6982e83SLemover    val is_match = pmp.is_match(req.addr, req.size, lgMaxSize, last_pmp)
352*b6982e83SLemover    val ignore = passThrough && !pmp.cfg.l
353*b6982e83SLemover    val aligned = pmp.aligned(req.addr, req.size, lgMaxSize, last_pmp)
354*b6982e83SLemover
355*b6982e83SLemover    val cur = WireInit(pmp)
356*b6982e83SLemover    cur.cfg.r := aligned && (pmp.cfg.r || ignore)
357*b6982e83SLemover    cur.cfg.w := aligned && (pmp.cfg.w || ignore)
358*b6982e83SLemover    cur.cfg.x := aligned && (pmp.cfg.x || ignore)
359*b6982e83SLemover
360*b6982e83SLemover    match_wave(i) := is_match
361*b6982e83SLemover    ignore_wave(i) := ignore
362*b6982e83SLemover    aligned_wave(i) := aligned
363*b6982e83SLemover    cur_wave(i) := cur
364*b6982e83SLemover    prev_wave(i) := prev
365*b6982e83SLemover
366*b6982e83SLemover    XSDebug(p"pmp${i.U} cfg:${Hexadecimal(pmp.cfg.asUInt)} addr:${Hexadecimal(pmp.addr)} mask:${Hexadecimal(pmp.mask)} is_match:${is_match} aligned:${aligned}")
367*b6982e83SLemover
368*b6982e83SLemover    Mux(is_match, cur, prev)
369*b6982e83SLemover  }
370*b6982e83SLemover
371*b6982e83SLemover  // NOTE: if itlb or dtlb may get blocked, this may also need do it
372*b6982e83SLemover  val ld = TlbCmd.isRead(req.cmd) && !TlbCmd.isAtom(req.cmd) && !res.cfg.r
373*b6982e83SLemover  val st = (TlbCmd.isWrite(req.cmd) || TlbCmd.isAtom(req.cmd)) && !res.cfg.w
374*b6982e83SLemover  val instr = TlbCmd.isExec(req.cmd) && !res.cfg.x
375*b6982e83SLemover  if (sameCycle) {
376*b6982e83SLemover    io.resp.ld := ld
377*b6982e83SLemover    io.resp.st := st
378*b6982e83SLemover    io.resp.instr := instr
379*b6982e83SLemover  } else {
380*b6982e83SLemover    io.resp.ld := RegEnable(ld, io.req.valid)
381*b6982e83SLemover    io.resp.st := RegEnable(st, io.req.valid)
382*b6982e83SLemover    io.resp.instr := RegEnable(instr, io.req.valid)
383*b6982e83SLemover  }
384*b6982e83SLemover}
385