xref: /XiangShan/src/main/scala/xiangshan/backend/fu/PMP.scala (revision 3c02ee8f82edea481fa8336c7f54ffc17fafba91)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17// See LICENSE.SiFive for license details.
18
19package xiangshan.backend.fu
20
21import chipsalliance.rocketchip.config.Parameters
22import chisel3._
23import chisel3.internal.naming.chiselName
24import chisel3.util._
25import utility.MaskedRegMap.WritableMask
26import xiangshan._
27import xiangshan.backend.fu.util.HasCSRConst
28import utils._
29import utility._
30import xiangshan.cache.mmu.{TlbCmd, TlbExceptionBundle}
31
32trait PMPConst extends HasPMParameters {
33  val PMPOffBits = 2 // minimal 4bytes
34  val CoarserGrain: Boolean = PlatformGrain > PMPOffBits
35}
36
37abstract class PMPBundle(implicit val p: Parameters) extends Bundle with PMPConst
38abstract class PMPModule(implicit val p: Parameters) extends Module with PMPConst
39abstract class PMPXSModule(implicit p: Parameters) extends XSModule with PMPConst
40
41@chiselName
42class PMPConfig(implicit p: Parameters) extends PMPBundle {
43  val l = Bool()
44  val c = Bool() // res(1), unuse in pmp
45  val atomic = Bool() // res(0), unuse in pmp
46  val a = UInt(2.W)
47  val x = Bool()
48  val w = Bool()
49  val r = Bool()
50
51  def res: UInt = Cat(c, atomic) // in pmp, unused
52  def off = a === 0.U
53  def tor = a === 1.U
54  def na4 = { if (CoarserGrain) false.B else a === 2.U }
55  def napot = { if (CoarserGrain) a(1).asBool else a === 3.U }
56  def off_tor = !a(1)
57  def na4_napot = a(1)
58
59  def locked = l
60  def addr_locked: Bool = locked
61  def addr_locked(next: PMPConfig): Bool = locked || (next.locked && next.tor)
62}
63
64trait PMPReadWriteMethodBare extends PMPConst {
65  def match_mask(cfg: PMPConfig, paddr: UInt) = {
66    val match_mask_c_addr = Cat(paddr, cfg.a(0)) | (((1 << PlatformGrain) - 1) >> PMPOffBits).U((paddr.getWidth + 1).W)
67    Cat(match_mask_c_addr & ~(match_mask_c_addr + 1.U), ((1 << PMPOffBits) - 1).U(PMPOffBits.W))
68  }
69
70  def write_cfg_vec(mask: Vec[UInt], addr: Vec[UInt], index: Int)(cfgs: UInt): UInt = {
71    val cfgVec = Wire(Vec(cfgs.getWidth/8, new PMPConfig))
72    for (i <- cfgVec.indices) {
73      val cfg_w_m_tmp = cfgs((i+1)*8-1, i*8).asUInt.asTypeOf(new PMPConfig)
74      cfgVec(i) := cfg_w_m_tmp
75      when (!cfg_w_m_tmp.l) {
76        cfgVec(i).w := cfg_w_m_tmp.w && cfg_w_m_tmp.r
77        if (CoarserGrain) { cfgVec(i).a := Cat(cfg_w_m_tmp.a(1), cfg_w_m_tmp.a.orR) }
78        when (cfgVec(i).na4_napot) {
79          mask(index + i) := match_mask(cfgVec(i), addr(index + i))
80        }
81      }
82    }
83    cfgVec.asUInt
84  }
85
86  def read_addr(cfg: PMPConfig)(addr: UInt): UInt = {
87    val G = PlatformGrain - PMPOffBits
88    require(G >= 0)
89    if (G == 0) {
90      addr
91    } else if (G >= 2) {
92      Mux(cfg.na4_napot, set_low_bits(addr, G-1), clear_low_bits(addr, G))
93    } else { // G is 1
94      Mux(cfg.off_tor, clear_low_bits(addr, G), addr)
95    }
96  }
97
98  def write_addr(next: PMPConfig, mask: UInt)(paddr: UInt, cfg: PMPConfig, addr: UInt): UInt = {
99    val locked = cfg.addr_locked(next)
100    mask := Mux(!locked, match_mask(cfg, paddr), mask)
101    Mux(!locked, paddr, addr)
102  }
103
104  def set_low_bits(data: UInt, num: Int): UInt = {
105    require(num >= 0)
106    data | ((1 << num)-1).U
107  }
108
109  /** mask the data's low num bits (lsb) */
110  def clear_low_bits(data: UInt, num: Int): UInt = {
111    require(num >= 0)
112    // use Cat instead of & with mask to avoid "Signal Width" problem
113    if (num == 0) { data }
114    else { Cat(data(data.getWidth-1, num), 0.U(num.W)) }
115  }
116}
117
118trait PMPReadWriteMethod extends PMPReadWriteMethodBare  { this: PMPBase =>
119  def write_cfg_vec(cfgs: UInt): UInt = {
120    val cfgVec = Wire(Vec(cfgs.getWidth/8, new PMPConfig))
121    for (i <- cfgVec.indices) {
122      val cfg_w_tmp = cfgs((i+1)*8-1, i*8).asUInt.asTypeOf(new PMPConfig)
123      cfgVec(i) := cfg_w_tmp
124      when (!cfg_w_tmp.l) {
125        cfgVec(i).w := cfg_w_tmp.w && cfg_w_tmp.r
126        if (CoarserGrain) { cfgVec(i).a := Cat(cfg_w_tmp.a(1), cfg_w_tmp.a.orR) }
127      }
128    }
129    cfgVec.asUInt
130  }
131
132  /** In general, the PMP grain is 2**{G+2} bytes. when G >= 1, na4 is not selectable.
133   * When G >= 2 and cfg.a(1) is set(then the mode is napot), the bits addr(G-2, 0) read as zeros.
134   * When G >= 1 and cfg.a(1) is clear(the mode is off or tor), the addr(G-1, 0) read as zeros.
135   * The low OffBits is dropped
136   */
137  def read_addr(): UInt = {
138    read_addr(cfg)(addr)
139  }
140
141  /** addr for inside addr, drop OffBits with.
142   * compare_addr for inside addr for comparing.
143   * paddr for outside addr.
144   */
145  def write_addr(next: PMPConfig)(paddr: UInt): UInt = {
146    Mux(!cfg.addr_locked(next), paddr, addr)
147  }
148  def write_addr(paddr: UInt): UInt = {
149    Mux(!cfg.addr_locked, paddr, addr)
150  }
151}
152
153/** PMPBase for CSR unit
154  * with only read and write logic
155  */
156@chiselName
157class PMPBase(implicit p: Parameters) extends PMPBundle with PMPReadWriteMethod {
158  val cfg = new PMPConfig
159  val addr = UInt((PMPAddrBits - PMPOffBits).W)
160
161  def gen(cfg: PMPConfig, addr: UInt) = {
162    require(addr.getWidth == this.addr.getWidth)
163    this.cfg := cfg
164    this.addr := addr
165  }
166}
167
168trait PMPMatchMethod extends PMPConst { this: PMPEntry =>
169  /** compare_addr is used to compare with input addr */
170  def compare_addr: UInt = ((addr << PMPOffBits) & ~(((1 << PlatformGrain) - 1).U(PMPAddrBits.W))).asUInt
171
172  /** size and maxSize are all log2 Size
173   * for dtlb, the maxSize is bPMXLEN which is 8
174   * for itlb and ptw, the maxSize is log2(512) ?
175   * but we may only need the 64 bytes? how to prevent the bugs?
176   * TODO: handle the special case that itlb & ptw & dcache access wider size than PMXLEN
177   */
178  def is_match(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last_pmp: PMPEntry): Bool = {
179    Mux(cfg.na4_napot, napotMatch(paddr, lgSize, lgMaxSize),
180      Mux(cfg.tor, torMatch(paddr, lgSize, lgMaxSize, last_pmp), false.B))
181  }
182
183  /** generate match mask to help match in napot mode */
184  def match_mask(paddr: UInt): UInt = {
185    match_mask(cfg, paddr)
186  }
187
188  def boundMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int): Bool = {
189    if (lgMaxSize <= PlatformGrain) {
190      (paddr < compare_addr)
191    } else {
192      val highLess = (paddr >> lgMaxSize) < (compare_addr >> lgMaxSize)
193      val highEqual = (paddr >> lgMaxSize) === (compare_addr >> lgMaxSize)
194      val lowLess = (paddr(lgMaxSize-1, 0) | OneHot.UIntToOH1(lgSize, lgMaxSize))  < compare_addr(lgMaxSize-1, 0)
195      highLess || (highEqual && lowLess)
196    }
197  }
198
199  def lowerBoundMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int): Bool = {
200    !boundMatch(paddr, lgSize, lgMaxSize)
201  }
202
203  def higherBoundMatch(paddr: UInt, lgMaxSize: Int) = {
204    boundMatch(paddr, 0.U, lgMaxSize)
205  }
206
207  def torMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last_pmp: PMPEntry): Bool = {
208    last_pmp.lowerBoundMatch(paddr, lgSize, lgMaxSize) && higherBoundMatch(paddr, lgMaxSize)
209  }
210
211  def unmaskEqual(a: UInt, b: UInt, m: UInt) = {
212    (a & ~m) === (b & ~m)
213  }
214
215  def napotMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int) = {
216    if (lgMaxSize <= PlatformGrain) {
217      unmaskEqual(paddr, compare_addr, mask)
218    } else {
219      val lowMask = mask | OneHot.UIntToOH1(lgSize, lgMaxSize)
220      val highMatch = unmaskEqual(paddr >> lgMaxSize, compare_addr >> lgMaxSize, mask >> lgMaxSize)
221      val lowMatch = unmaskEqual(paddr(lgMaxSize-1, 0), compare_addr(lgMaxSize-1, 0), lowMask(lgMaxSize-1, 0))
222      highMatch && lowMatch
223    }
224  }
225
226  def aligned(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last: PMPEntry) = {
227    if (lgMaxSize <= PlatformGrain) {
228      true.B
229    } else {
230      val lowBitsMask = OneHot.UIntToOH1(lgSize, lgMaxSize)
231      val lowerBound = ((paddr >> lgMaxSize) === (last.compare_addr >> lgMaxSize)) &&
232        ((~paddr(lgMaxSize-1, 0) & last.compare_addr(lgMaxSize-1, 0)) =/= 0.U)
233      val upperBound = ((paddr >> lgMaxSize) === (compare_addr >> lgMaxSize)) &&
234        ((compare_addr(lgMaxSize-1, 0) & (paddr(lgMaxSize-1, 0) | lowBitsMask)) =/= 0.U)
235      val torAligned = !(lowerBound || upperBound)
236      val napotAligned = (lowBitsMask & ~mask(lgMaxSize-1, 0)) === 0.U
237      Mux(cfg.na4_napot, napotAligned, torAligned)
238    }
239  }
240}
241
242/** PMPEntry for outside pmp copies
243  * with one more elements mask to help napot match
244  * TODO: make mask an element, not an method, for timing opt
245  */
246@chiselName
247class PMPEntry(implicit p: Parameters) extends PMPBase with PMPMatchMethod {
248  val mask = UInt(PMPAddrBits.W) // help to match in napot
249
250  def write_addr(next: PMPConfig, mask: UInt)(paddr: UInt) = {
251    mask := Mux(!cfg.addr_locked(next), match_mask(paddr), mask)
252    Mux(!cfg.addr_locked(next), paddr, addr)
253  }
254
255  def write_addr(mask: UInt)(paddr: UInt) = {
256    mask := Mux(!cfg.addr_locked, match_mask(paddr), mask)
257    Mux(!cfg.addr_locked, paddr, addr)
258  }
259
260  def gen(cfg: PMPConfig, addr: UInt, mask: UInt) = {
261    require(addr.getWidth == this.addr.getWidth)
262    this.cfg := cfg
263    this.addr := addr
264    this.mask := mask
265  }
266}
267
268trait PMPMethod extends PMPConst {
269  def pmp_init() : (Vec[UInt], Vec[UInt], Vec[UInt])= {
270    val cfg = WireInit(0.U.asTypeOf(Vec(NumPMP/8, UInt(PMXLEN.W))))
271    val addr = Wire(Vec(NumPMP, UInt((PMPAddrBits-PMPOffBits).W)))
272    val mask = Wire(Vec(NumPMP, UInt(PMPAddrBits.W)))
273    addr := DontCare
274    mask := DontCare
275    (cfg, addr, mask)
276  }
277
278  def pmp_gen_mapping
279  (
280    init: () => (Vec[UInt], Vec[UInt], Vec[UInt]),
281    num: Int = 16,
282    cfgBase: Int,
283    addrBase: Int,
284    entries: Vec[PMPEntry]
285  ) = {
286    val pmpCfgPerCSR = PMXLEN / new PMPConfig().getWidth
287    def pmpCfgIndex(i: Int) = (PMXLEN / 32) * (i / pmpCfgPerCSR)
288    val init_value = init()
289    /** to fit MaskedRegMap's write, declare cfgs as Merged CSRs and split them into each pmp */
290    val cfgMerged = RegInit(init_value._1) //(Vec(num / pmpCfgPerCSR, UInt(PMXLEN.W))) // RegInit(VecInit(Seq.fill(num / pmpCfgPerCSR)(0.U(PMXLEN.W))))
291    val cfgs = WireInit(cfgMerged).asTypeOf(Vec(num, new PMPConfig()))
292    val addr = RegInit(init_value._2) // (Vec(num, UInt((PMPAddrBits-PMPOffBits).W)))
293    val mask = RegInit(init_value._3) // (Vec(num, UInt(PMPAddrBits.W)))
294
295    for (i <- entries.indices) {
296      entries(i).gen(cfgs(i), addr(i), mask(i))
297    }
298
299    val cfg_mapping = (0 until num by pmpCfgPerCSR).map(i => {Map(
300      MaskedRegMap(
301        addr = cfgBase + pmpCfgIndex(i),
302        reg = cfgMerged(i/pmpCfgPerCSR),
303        wmask = WritableMask,
304        wfn = new PMPBase().write_cfg_vec(mask, addr, i)
305      ))
306    }).fold(Map())((a, b) => a ++ b) // ugly code, hit me if u have better codes
307
308    val addr_mapping = (0 until num).map(i => {Map(
309      MaskedRegMap(
310        addr = addrBase + i,
311        reg = addr(i),
312        wmask = WritableMask,
313        wfn = { if (i != num-1) entries(i).write_addr(entries(i+1).cfg, mask(i)) else entries(i).write_addr(mask(i)) },
314        rmask = WritableMask,
315        rfn = new PMPBase().read_addr(entries(i).cfg)
316      ))
317    }).fold(Map())((a, b) => a ++ b) // ugly code, hit me if u have better codes.
318
319    cfg_mapping ++ addr_mapping
320  }
321}
322
323@chiselName
324class PMP(implicit p: Parameters) extends PMPXSModule with HasXSParameter with PMPMethod with PMAMethod with HasCSRConst {
325  val io = IO(new Bundle {
326    val distribute_csr = Flipped(new DistributedCSRIO())
327    val pmp = Output(Vec(NumPMP, new PMPEntry()))
328    val pma = Output(Vec(NumPMA, new PMPEntry()))
329  })
330
331  val w = io.distribute_csr.w
332
333  val pmp = Wire(Vec(NumPMP, new PMPEntry()))
334  val pma = Wire(Vec(NumPMA, new PMPEntry()))
335
336  val pmpMapping = pmp_gen_mapping(pmp_init, NumPMP, PmpcfgBase, PmpaddrBase, pmp)
337  val pmaMapping = pmp_gen_mapping(pma_init, NumPMA, PmacfgBase, PmaaddrBase, pma)
338  val mapping = pmpMapping ++ pmaMapping
339
340  val rdata = Wire(UInt(PMXLEN.W))
341  MaskedRegMap.generate(mapping, w.bits.addr, rdata, w.valid, w.bits.data)
342
343  io.pmp := pmp
344  io.pma := pma
345}
346
347class PMPReqBundle(lgMaxSize: Int = 3)(implicit p: Parameters) extends PMPBundle {
348  val addr = Output(UInt(PMPAddrBits.W))
349  val size = Output(UInt(log2Ceil(lgMaxSize+1).W))
350  val cmd = Output(TlbCmd())
351
352  def apply(addr: UInt, size: UInt, cmd: UInt) {
353    this.addr := addr
354    this.size := size
355    this.cmd := cmd
356  }
357
358  def apply(addr: UInt) { // req minimal permission and req align size
359    apply(addr, lgMaxSize.U, TlbCmd.read)
360  }
361
362}
363
364class PMPRespBundle(implicit p: Parameters) extends PMPBundle {
365  val ld = Output(Bool())
366  val st = Output(Bool())
367  val instr = Output(Bool())
368  val mmio = Output(Bool())
369  val atomic = Output(Bool())
370
371  def |(resp: PMPRespBundle): PMPRespBundle = {
372    val res = Wire(new PMPRespBundle())
373    res.ld := this.ld || resp.ld
374    res.st := this.st || resp.st
375    res.instr := this.instr || resp.instr
376    res.mmio := this.mmio || resp.mmio
377    res.atomic := this.atomic || resp.atomic
378    res
379  }
380}
381
382trait PMPCheckMethod extends PMPConst {
383  def pmp_check(cmd: UInt, cfg: PMPConfig) = {
384    val resp = Wire(new PMPRespBundle)
385    resp.ld := TlbCmd.isRead(cmd) && !TlbCmd.isAmo(cmd) && !cfg.r
386    resp.st := (TlbCmd.isWrite(cmd) || TlbCmd.isAmo(cmd)) && !cfg.w
387    resp.instr := TlbCmd.isExec(cmd) && !cfg.x
388    resp.mmio := false.B
389    resp.atomic := false.B
390    resp
391  }
392
393  def pmp_match_res(leaveHitMux: Boolean = false, valid: Bool = true.B)(
394    addr: UInt,
395    size: UInt,
396    pmpEntries: Vec[PMPEntry],
397    mode: UInt,
398    lgMaxSize: Int
399  ) = {
400    val num = pmpEntries.size
401    require(num == NumPMP)
402
403    val passThrough = if (pmpEntries.isEmpty) true.B else (mode > 1.U)
404    val pmpDefault = WireInit(0.U.asTypeOf(new PMPEntry()))
405    pmpDefault.cfg.r := passThrough
406    pmpDefault.cfg.w := passThrough
407    pmpDefault.cfg.x := passThrough
408
409    val match_vec = Wire(Vec(num+1, Bool()))
410    val cfg_vec = Wire(Vec(num+1, new PMPEntry()))
411
412    pmpEntries.zip(pmpDefault +: pmpEntries.take(num-1)).zipWithIndex.foreach{ case ((pmp, last_pmp), i) =>
413      val is_match = pmp.is_match(addr, size, lgMaxSize, last_pmp)
414      val ignore = passThrough && !pmp.cfg.l
415      val aligned = pmp.aligned(addr, size, lgMaxSize, last_pmp)
416
417      val cur = WireInit(pmp)
418      cur.cfg.r := aligned && (pmp.cfg.r || ignore)
419      cur.cfg.w := aligned && (pmp.cfg.w || ignore)
420      cur.cfg.x := aligned && (pmp.cfg.x || ignore)
421
422//      Mux(is_match, cur, prev)
423      match_vec(i) := is_match
424      cfg_vec(i) := cur
425    }
426
427    // default value
428    match_vec(num) := true.B
429    cfg_vec(num) := pmpDefault
430
431    if (leaveHitMux) {
432      ParallelPriorityMux(match_vec.map(RegEnable(_, false.B, valid)), RegEnable(cfg_vec, valid))
433    } else {
434      ParallelPriorityMux(match_vec, cfg_vec)
435    }
436  }
437}
438
439class PMPCheckerEnv(implicit p: Parameters) extends PMPBundle {
440  val mode = UInt(2.W)
441  val pmp = Vec(NumPMP, new PMPEntry())
442  val pma = Vec(NumPMA, new PMPEntry())
443
444  def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry]): Unit = {
445    this.mode := mode
446    this.pmp := pmp
447    this.pma := pma
448  }
449}
450
451class PMPCheckIO(lgMaxSize: Int)(implicit p: Parameters) extends PMPBundle {
452  val check_env = Input(new PMPCheckerEnv())
453  val req = Flipped(Valid(new PMPReqBundle(lgMaxSize))) // usage: assign the valid to fire signal
454  val resp = new PMPRespBundle()
455
456  def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], req: Valid[PMPReqBundle]) = {
457    check_env.apply(mode, pmp, pma)
458    this.req := req
459    resp
460  }
461
462  def req_apply(valid: Bool, addr: UInt): Unit = {
463    this.req.valid := valid
464    this.req.bits.apply(addr)
465  }
466
467  def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], valid: Bool, addr: UInt) = {
468    check_env.apply(mode, pmp, pma)
469    req_apply(valid, addr)
470    resp
471  }
472}
473
474class PMPCheckv2IO(lgMaxSize: Int)(implicit p: Parameters) extends PMPBundle {
475  val check_env = Input(new PMPCheckerEnv())
476  val req = Flipped(Valid(new PMPReqBundle(lgMaxSize))) // usage: assign the valid to fire signal
477  val resp = Output(new PMPConfig())
478
479  def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], req: Valid[PMPReqBundle]) = {
480    check_env.apply(mode, pmp, pma)
481    this.req := req
482    resp
483  }
484
485  def req_apply(valid: Bool, addr: UInt): Unit = {
486    this.req.valid := valid
487    this.req.bits.apply(addr)
488  }
489
490  def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], valid: Bool, addr: UInt) = {
491    check_env.apply(mode, pmp, pma)
492    req_apply(valid, addr)
493    resp
494  }
495}
496
497@chiselName
498class PMPChecker
499(
500  lgMaxSize: Int = 3,
501  sameCycle: Boolean = false,
502  leaveHitMux: Boolean = false,
503  pmpUsed: Boolean = true
504)(implicit p: Parameters) extends PMPModule
505  with PMPCheckMethod
506  with PMACheckMethod
507{
508  require(!(leaveHitMux && sameCycle))
509  val io = IO(new PMPCheckIO(lgMaxSize))
510
511  val req = io.req.bits
512
513  val res_pmp = pmp_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pmp, io.check_env.mode, lgMaxSize)
514  val res_pma = pma_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pma, io.check_env.mode, lgMaxSize)
515
516  val resp_pmp = pmp_check(req.cmd, res_pmp.cfg)
517  val resp_pma = pma_check(req.cmd, res_pma.cfg)
518  val resp = if (pmpUsed) (resp_pmp | resp_pma) else resp_pma
519
520  if (sameCycle || leaveHitMux) {
521    io.resp := resp
522  } else {
523    io.resp := RegEnable(resp, io.req.valid)
524  }
525}
526
527/* get config with check */
528@chiselName
529class PMPCheckerv2
530(
531  lgMaxSize: Int = 3,
532  sameCycle: Boolean = false,
533  leaveHitMux: Boolean = false
534)(implicit p: Parameters) extends PMPModule
535  with PMPCheckMethod
536  with PMACheckMethod
537{
538  require(!(leaveHitMux && sameCycle))
539  val io = IO(new PMPCheckv2IO(lgMaxSize))
540
541  val req = io.req.bits
542
543  val res_pmp = pmp_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pmp, io.check_env.mode, lgMaxSize)
544  val res_pma = pma_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pma, io.check_env.mode, lgMaxSize)
545
546  val resp = and(res_pmp, res_pma)
547
548  if (sameCycle || leaveHitMux) {
549    io.resp := resp
550  } else {
551    io.resp := RegEnable(resp, io.req.valid)
552  }
553
554  def and(pmp: PMPEntry, pma: PMPEntry): PMPConfig = {
555    val tmp_res = Wire(new PMPConfig)
556    tmp_res.l := DontCare
557    tmp_res.a := DontCare
558    tmp_res.r := pmp.cfg.r && pma.cfg.r
559    tmp_res.w := pmp.cfg.w && pma.cfg.w
560    tmp_res.x := pmp.cfg.x && pma.cfg.x
561    tmp_res.c := pma.cfg.c
562    tmp_res.atomic := pma.cfg.atomic
563    tmp_res
564  }
565}
566