xref: /XiangShan/src/main/scala/xiangshan/backend/fu/PMP.scala (revision 7a2fc509e2d355879c4db3dc3f17a6ccacd3d09e)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17// See LICENSE.SiFive for license details.
18
19package xiangshan.backend.fu
20
21import chipsalliance.rocketchip.config.Parameters
22import chisel3._
23import chisel3.internal.naming.chiselName
24import chisel3.util._
25import utils.MaskedRegMap.WritableMask
26import xiangshan._
27import xiangshan.backend.fu.util.HasCSRConst
28import utils._
29import xiangshan.cache.mmu.{TlbCmd, TlbExceptionBundle}
30
31trait PMPConst extends HasPMParameters {
32  val PMPOffBits = 2 // minimal 4bytes
33  val CoarserGrain: Boolean = PlatformGrain > PMPOffBits
34}
35
36abstract class PMPBundle(implicit val p: Parameters) extends Bundle with PMPConst
37abstract class PMPModule(implicit val p: Parameters) extends Module with PMPConst
38abstract class PMPXSModule(implicit p: Parameters) extends XSModule with PMPConst
39
40@chiselName
41class PMPConfig(implicit p: Parameters) extends PMPBundle {
42  val l = Bool()
43  val c = Bool() // res(1), unuse in pmp
44  val atomic = Bool() // res(0), unuse in pmp
45  val a = UInt(2.W)
46  val x = Bool()
47  val w = Bool()
48  val r = Bool()
49
50  def res: UInt = Cat(c, atomic) // in pmp, unused
51  def off = a === 0.U
52  def tor = a === 1.U
53  def na4 = { if (CoarserGrain) false.B else a === 2.U }
54  def napot = { if (CoarserGrain) a(1).asBool else a === 3.U }
55  def off_tor = !a(1)
56  def na4_napot = a(1)
57
58  def locked = l
59  def addr_locked: Bool = locked
60  def addr_locked(next: PMPConfig): Bool = locked || (next.locked && next.tor)
61}
62
63trait PMPReadWriteMethodBare extends PMPConst {
64  def match_mask(cfg: PMPConfig, paddr: UInt) = {
65    val match_mask_c_addr = Cat(paddr, cfg.a(0)) | (((1 << PlatformGrain) - 1) >> PMPOffBits).U((paddr.getWidth + 1).W)
66    Cat(match_mask_c_addr & ~(match_mask_c_addr + 1.U), ((1 << PMPOffBits) - 1).U(PMPOffBits.W))
67  }
68
69  def write_cfg_vec(mask: Vec[UInt], addr: Vec[UInt], index: Int)(cfgs: UInt): UInt = {
70    val cfgVec = Wire(Vec(cfgs.getWidth/8, new PMPConfig))
71    for (i <- cfgVec.indices) {
72      val cfg_w_m_tmp = cfgs((i+1)*8-1, i*8).asUInt.asTypeOf(new PMPConfig)
73      cfgVec(i) := cfg_w_m_tmp
74      when (!cfg_w_m_tmp.l) {
75        cfgVec(i).w := cfg_w_m_tmp.w && cfg_w_m_tmp.r
76        if (CoarserGrain) { cfgVec(i).a := Cat(cfg_w_m_tmp.a(1), cfg_w_m_tmp.a.orR) }
77        when (cfgVec(i).na4_napot) {
78          mask(index + i) := match_mask(cfgVec(i), addr(index + i))
79        }
80      }
81    }
82    cfgVec.asUInt
83  }
84
85  def read_addr(cfg: PMPConfig)(addr: UInt): UInt = {
86    val G = PlatformGrain - PMPOffBits
87    require(G >= 0)
88    if (G == 0) {
89      addr
90    } else if (G >= 2) {
91      Mux(cfg.na4_napot, set_low_bits(addr, G-1), clear_low_bits(addr, G))
92    } else { // G is 1
93      Mux(cfg.off_tor, clear_low_bits(addr, G), addr)
94    }
95  }
96
97  def write_addr(next: PMPConfig, mask: UInt)(paddr: UInt, cfg: PMPConfig, addr: UInt): UInt = {
98    val locked = cfg.addr_locked(next)
99    mask := Mux(!locked, match_mask(cfg, paddr), mask)
100    Mux(!locked, paddr, addr)
101  }
102
103  def set_low_bits(data: UInt, num: Int): UInt = {
104    require(num >= 0)
105    data | ((1 << num)-1).U
106  }
107
108  /** mask the data's low num bits (lsb) */
109  def clear_low_bits(data: UInt, num: Int): UInt = {
110    require(num >= 0)
111    // use Cat instead of & with mask to avoid "Signal Width" problem
112    if (num == 0) { data }
113    else { Cat(data(data.getWidth-1, num), 0.U(num.W)) }
114  }
115}
116
117trait PMPReadWriteMethod extends PMPReadWriteMethodBare  { this: PMPBase =>
118  def write_cfg_vec(cfgs: UInt): UInt = {
119    val cfgVec = Wire(Vec(cfgs.getWidth/8, new PMPConfig))
120    for (i <- cfgVec.indices) {
121      val cfg_w_tmp = cfgs((i+1)*8-1, i*8).asUInt.asTypeOf(new PMPConfig)
122      cfgVec(i) := cfg_w_tmp
123      when (!cfg_w_tmp.l) {
124        cfgVec(i).w := cfg_w_tmp.w && cfg_w_tmp.r
125        if (CoarserGrain) { cfgVec(i).a := Cat(cfg_w_tmp.a(1), cfg_w_tmp.a.orR) }
126      }
127    }
128    cfgVec.asUInt
129  }
130
131  /** In general, the PMP grain is 2**{G+2} bytes. when G >= 1, na4 is not selectable.
132   * When G >= 2 and cfg.a(1) is set(then the mode is napot), the bits addr(G-2, 0) read as zeros.
133   * When G >= 1 and cfg.a(1) is clear(the mode is off or tor), the addr(G-1, 0) read as zeros.
134   * The low OffBits is dropped
135   */
136  def read_addr(): UInt = {
137    read_addr(cfg)(addr)
138  }
139
140  /** addr for inside addr, drop OffBits with.
141   * compare_addr for inside addr for comparing.
142   * paddr for outside addr.
143   */
144  def write_addr(next: PMPConfig)(paddr: UInt): UInt = {
145    Mux(!cfg.addr_locked(next), paddr, addr)
146  }
147  def write_addr(paddr: UInt): UInt = {
148    Mux(!cfg.addr_locked, paddr, addr)
149  }
150}
151
152/** PMPBase for CSR unit
153  * with only read and write logic
154  */
155@chiselName
156class PMPBase(implicit p: Parameters) extends PMPBundle with PMPReadWriteMethod {
157  val cfg = new PMPConfig
158  val addr = UInt((PMPAddrBits - PMPOffBits).W)
159
160  def gen(cfg: PMPConfig, addr: UInt) = {
161    require(addr.getWidth == this.addr.getWidth)
162    this.cfg := cfg
163    this.addr := addr
164  }
165}
166
167trait PMPMatchMethod extends PMPConst { this: PMPEntry =>
168  /** compare_addr is used to compare with input addr */
169  def compare_addr: UInt = ((addr << PMPOffBits) & ~(((1 << PlatformGrain) - 1).U(PMPAddrBits.W))).asUInt
170
171  /** size and maxSize are all log2 Size
172   * for dtlb, the maxSize is bPMXLEN which is 8
173   * for itlb and ptw, the maxSize is log2(512) ?
174   * but we may only need the 64 bytes? how to prevent the bugs?
175   * TODO: handle the special case that itlb & ptw & dcache access wider size than PMXLEN
176   */
177  def is_match(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last_pmp: PMPEntry): Bool = {
178    Mux(cfg.na4_napot, napotMatch(paddr, lgSize, lgMaxSize),
179      Mux(cfg.tor, torMatch(paddr, lgSize, lgMaxSize, last_pmp), false.B))
180  }
181
182  /** generate match mask to help match in napot mode */
183  def match_mask(paddr: UInt): UInt = {
184    match_mask(cfg, paddr)
185  }
186
187  def boundMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int): Bool = {
188    if (lgMaxSize <= PlatformGrain) {
189      (paddr < compare_addr)
190    } else {
191      val highLess = (paddr >> lgMaxSize) < (compare_addr >> lgMaxSize)
192      val highEqual = (paddr >> lgMaxSize) === (compare_addr >> lgMaxSize)
193      val lowLess = (paddr(lgMaxSize-1, 0) | OneHot.UIntToOH1(lgSize, lgMaxSize))  < compare_addr(lgMaxSize-1, 0)
194      highLess || (highEqual && lowLess)
195    }
196  }
197
198  def lowerBoundMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int): Bool = {
199    !boundMatch(paddr, lgSize, lgMaxSize)
200  }
201
202  def higherBoundMatch(paddr: UInt, lgMaxSize: Int) = {
203    boundMatch(paddr, 0.U, lgMaxSize)
204  }
205
206  def torMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last_pmp: PMPEntry): Bool = {
207    last_pmp.lowerBoundMatch(paddr, lgSize, lgMaxSize) && higherBoundMatch(paddr, lgMaxSize)
208  }
209
210  def unmaskEqual(a: UInt, b: UInt, m: UInt) = {
211    (a & ~m) === (b & ~m)
212  }
213
214  def napotMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int) = {
215    if (lgMaxSize <= PlatformGrain) {
216      unmaskEqual(paddr, compare_addr, mask)
217    } else {
218      val lowMask = mask | OneHot.UIntToOH1(lgSize, lgMaxSize)
219      val highMatch = unmaskEqual(paddr >> lgMaxSize, compare_addr >> lgMaxSize, mask >> lgMaxSize)
220      val lowMatch = unmaskEqual(paddr(lgMaxSize-1, 0), compare_addr(lgMaxSize-1, 0), lowMask(lgMaxSize-1, 0))
221      highMatch && lowMatch
222    }
223  }
224
225  def aligned(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last: PMPEntry) = {
226    if (lgMaxSize <= PlatformGrain) {
227      true.B
228    } else {
229      val lowBitsMask = OneHot.UIntToOH1(lgSize, lgMaxSize)
230      val lowerBound = ((paddr >> lgMaxSize) === (last.compare_addr >> lgMaxSize)) &&
231        ((~paddr(lgMaxSize-1, 0) & last.compare_addr(lgMaxSize-1, 0)) =/= 0.U)
232      val upperBound = ((paddr >> lgMaxSize) === (compare_addr >> lgMaxSize)) &&
233        ((compare_addr(lgMaxSize-1, 0) & (paddr(lgMaxSize-1, 0) | lowBitsMask)) =/= 0.U)
234      val torAligned = !(lowerBound || upperBound)
235      val napotAligned = (lowBitsMask & ~mask(lgMaxSize-1, 0)) === 0.U
236      Mux(cfg.na4_napot, napotAligned, torAligned)
237    }
238  }
239}
240
241/** PMPEntry for outside pmp copies
242  * with one more elements mask to help napot match
243  * TODO: make mask an element, not an method, for timing opt
244  */
245@chiselName
246class PMPEntry(implicit p: Parameters) extends PMPBase with PMPMatchMethod {
247  val mask = UInt(PMPAddrBits.W) // help to match in napot
248
249  def write_addr(next: PMPConfig, mask: UInt)(paddr: UInt) = {
250    mask := Mux(!cfg.addr_locked(next), match_mask(paddr), mask)
251    Mux(!cfg.addr_locked(next), paddr, addr)
252  }
253
254  def write_addr(mask: UInt)(paddr: UInt) = {
255    mask := Mux(!cfg.addr_locked, match_mask(paddr), mask)
256    Mux(!cfg.addr_locked, paddr, addr)
257  }
258
259  def gen(cfg: PMPConfig, addr: UInt, mask: UInt) = {
260    require(addr.getWidth == this.addr.getWidth)
261    this.cfg := cfg
262    this.addr := addr
263    this.mask := mask
264  }
265}
266
267trait PMPMethod extends PMPConst {
268  def pmp_init() : (Vec[UInt], Vec[UInt], Vec[UInt])= {
269    val cfg = WireInit(0.U.asTypeOf(Vec(NumPMP/8, UInt(PMXLEN.W))))
270    val addr = Wire(Vec(NumPMP, UInt((PMPAddrBits-PMPOffBits).W)))
271    val mask = Wire(Vec(NumPMP, UInt(PMPAddrBits.W)))
272    addr := DontCare
273    mask := DontCare
274    (cfg, addr, mask)
275  }
276
277  def pmp_gen_mapping
278  (
279    init: () => (Vec[UInt], Vec[UInt], Vec[UInt]),
280    num: Int = 16,
281    cfgBase: Int,
282    addrBase: Int,
283    entries: Vec[PMPEntry]
284  ) = {
285    val pmpCfgPerCSR = PMXLEN / new PMPConfig().getWidth
286    def pmpCfgIndex(i: Int) = (PMXLEN / 32) * (i / pmpCfgPerCSR)
287    val init_value = init()
288    /** to fit MaskedRegMap's write, declare cfgs as Merged CSRs and split them into each pmp */
289    val cfgMerged = RegInit(init_value._1) //(Vec(num / pmpCfgPerCSR, UInt(PMXLEN.W))) // RegInit(VecInit(Seq.fill(num / pmpCfgPerCSR)(0.U(PMXLEN.W))))
290    val cfgs = WireInit(cfgMerged).asTypeOf(Vec(num, new PMPConfig()))
291    val addr = RegInit(init_value._2) // (Vec(num, UInt((PMPAddrBits-PMPOffBits).W)))
292    val mask = RegInit(init_value._3) // (Vec(num, UInt(PMPAddrBits.W)))
293
294    for (i <- entries.indices) {
295      entries(i).gen(cfgs(i), addr(i), mask(i))
296    }
297
298    val cfg_mapping = (0 until num by pmpCfgPerCSR).map(i => {Map(
299      MaskedRegMap(
300        addr = cfgBase + pmpCfgIndex(i),
301        reg = cfgMerged(i/pmpCfgPerCSR),
302        wmask = WritableMask,
303        wfn = new PMPBase().write_cfg_vec(mask, addr, i)
304      ))
305    }).fold(Map())((a, b) => a ++ b) // ugly code, hit me if u have better codes
306
307    val addr_mapping = (0 until num).map(i => {Map(
308      MaskedRegMap(
309        addr = addrBase + i,
310        reg = addr(i),
311        wmask = WritableMask,
312        wfn = { if (i != num-1) entries(i).write_addr(entries(i+1).cfg, mask(i)) else entries(i).write_addr(mask(i)) },
313        rmask = WritableMask,
314        rfn = new PMPBase().read_addr(entries(i).cfg)
315      ))
316    }).fold(Map())((a, b) => a ++ b) // ugly code, hit me if u have better codes.
317
318    cfg_mapping ++ addr_mapping
319  }
320}
321
322@chiselName
323class PMP(implicit p: Parameters) extends PMPXSModule with HasXSParameter with PMPMethod with PMAMethod with HasCSRConst {
324  val io = IO(new Bundle {
325    val distribute_csr = Flipped(new DistributedCSRIO())
326    val pmp = Output(Vec(NumPMP, new PMPEntry()))
327    val pma = Output(Vec(NumPMA, new PMPEntry()))
328  })
329
330  val w = io.distribute_csr.w
331
332  val pmp = Wire(Vec(NumPMP, new PMPEntry()))
333  val pma = Wire(Vec(NumPMA, new PMPEntry()))
334
335  val pmpMapping = pmp_gen_mapping(pmp_init, NumPMP, PmpcfgBase, PmpaddrBase, pmp)
336  val pmaMapping = pmp_gen_mapping(pma_init, NumPMA, PmacfgBase, PmaaddrBase, pma)
337  val mapping = pmpMapping ++ pmaMapping
338
339  val rdata = Wire(UInt(PMXLEN.W))
340  MaskedRegMap.generate(mapping, w.bits.addr, rdata, w.valid, w.bits.data)
341
342  io.pmp := pmp
343  io.pma := pma
344}
345
346class PMPReqBundle(lgMaxSize: Int = 3)(implicit p: Parameters) extends PMPBundle {
347  val addr = Output(UInt(PMPAddrBits.W))
348  val size = Output(UInt(log2Ceil(lgMaxSize+1).W))
349  val cmd = Output(TlbCmd())
350
351  def apply(addr: UInt, size: UInt, cmd: UInt) {
352    this.addr := addr
353    this.size := size
354    this.cmd := cmd
355  }
356
357  def apply(addr: UInt) { // req minimal permission and req align size
358    apply(addr, lgMaxSize.U, TlbCmd.read)
359  }
360
361}
362
363class PMPRespBundle(implicit p: Parameters) extends PMPBundle {
364  val ld = Output(Bool())
365  val st = Output(Bool())
366  val instr = Output(Bool())
367  val mmio = Output(Bool())
368
369  def |(resp: PMPRespBundle): PMPRespBundle = {
370    val res = Wire(new PMPRespBundle())
371    res.ld := this.ld || resp.ld
372    res.st := this.st || resp.st
373    res.instr := this.instr || resp.instr
374    res.mmio := this.mmio || resp.mmio
375    res
376  }
377}
378
379trait PMPCheckMethod extends PMPConst {
380  def pmp_check(cmd: UInt, cfg: PMPConfig) = {
381    val resp = Wire(new PMPRespBundle)
382    resp.ld := TlbCmd.isRead(cmd) && !TlbCmd.isAtom(cmd) && !cfg.r
383    resp.st := (TlbCmd.isWrite(cmd) || TlbCmd.isAtom(cmd)) && !cfg.w
384    resp.instr := TlbCmd.isExec(cmd) && !cfg.x
385    resp.mmio := false.B
386    resp
387  }
388
389  def pmp_match_res(leaveHitMux: Boolean = false, valid: Bool = true.B)(
390    addr: UInt,
391    size: UInt,
392    pmpEntries: Vec[PMPEntry],
393    mode: UInt,
394    lgMaxSize: Int
395  ) = {
396    val num = pmpEntries.size
397    require(num == NumPMP)
398
399    val passThrough = if (pmpEntries.isEmpty) true.B else (mode > 1.U)
400    val pmpDefault = WireInit(0.U.asTypeOf(new PMPEntry()))
401    pmpDefault.cfg.r := passThrough
402    pmpDefault.cfg.w := passThrough
403    pmpDefault.cfg.x := passThrough
404
405    val match_vec = Wire(Vec(num+1, Bool()))
406    val cfg_vec = Wire(Vec(num+1, new PMPEntry()))
407
408    pmpEntries.zip(pmpDefault +: pmpEntries.take(num-1)).zipWithIndex.foreach{ case ((pmp, last_pmp), i) =>
409      val is_match = pmp.is_match(addr, size, lgMaxSize, last_pmp)
410      val ignore = passThrough && !pmp.cfg.l
411      val aligned = pmp.aligned(addr, size, lgMaxSize, last_pmp)
412
413      val cur = WireInit(pmp)
414      cur.cfg.r := aligned && (pmp.cfg.r || ignore)
415      cur.cfg.w := aligned && (pmp.cfg.w || ignore)
416      cur.cfg.x := aligned && (pmp.cfg.x || ignore)
417
418//      Mux(is_match, cur, prev)
419      match_vec(i) := is_match
420      cfg_vec(i) := cur
421    }
422
423    // default value
424    match_vec(num) := true.B
425    cfg_vec(num) := pmpDefault
426
427    if (leaveHitMux) {
428      ParallelPriorityMux(match_vec.map(RegEnable(_, init = false.B, valid)), RegEnable(cfg_vec, valid))
429    } else {
430      ParallelPriorityMux(match_vec, cfg_vec)
431    }
432  }
433}
434
435class PMPCheckerEnv(implicit p: Parameters) extends PMPBundle {
436  val mode = UInt(2.W)
437  val pmp = Vec(NumPMP, new PMPEntry())
438  val pma = Vec(NumPMA, new PMPEntry())
439
440  def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry]): Unit = {
441    this.mode := mode
442    this.pmp := pmp
443    this.pma := pma
444  }
445}
446
447class PMPCheckIO(lgMaxSize: Int)(implicit p: Parameters) extends PMPBundle {
448  val check_env = Input(new PMPCheckerEnv())
449  val req = Flipped(Valid(new PMPReqBundle(lgMaxSize))) // usage: assign the valid to fire signal
450  val resp = new PMPRespBundle()
451
452  def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], req: Valid[PMPReqBundle]) = {
453    check_env.apply(mode, pmp, pma)
454    this.req := req
455    resp
456  }
457
458  def req_apply(valid: Bool, addr: UInt): Unit = {
459    this.req.valid := valid
460    this.req.bits.apply(addr)
461  }
462
463  def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], valid: Bool, addr: UInt) = {
464    check_env.apply(mode, pmp, pma)
465    req_apply(valid, addr)
466    resp
467  }
468}
469
470class PMPCheckv2IO(lgMaxSize: Int)(implicit p: Parameters) extends PMPBundle {
471  val check_env = Input(new PMPCheckerEnv())
472  val req = Flipped(Valid(new PMPReqBundle(lgMaxSize))) // usage: assign the valid to fire signal
473  val resp = Output(new PMPConfig())
474
475  def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], req: Valid[PMPReqBundle]) = {
476    check_env.apply(mode, pmp, pma)
477    this.req := req
478    resp
479  }
480
481  def req_apply(valid: Bool, addr: UInt): Unit = {
482    this.req.valid := valid
483    this.req.bits.apply(addr)
484  }
485
486  def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], valid: Bool, addr: UInt) = {
487    check_env.apply(mode, pmp, pma)
488    req_apply(valid, addr)
489    resp
490  }
491}
492
493@chiselName
494class PMPChecker
495(
496  lgMaxSize: Int = 3,
497  sameCycle: Boolean = false,
498  leaveHitMux: Boolean = false,
499  pmpUsed: Boolean = true
500)(implicit p: Parameters) extends PMPModule
501  with PMPCheckMethod
502  with PMACheckMethod
503{
504  require(!(leaveHitMux && sameCycle))
505  val io = IO(new PMPCheckIO(lgMaxSize))
506
507  val req = io.req.bits
508
509  val res_pmp = pmp_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pmp, io.check_env.mode, lgMaxSize)
510  val res_pma = pma_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pma, io.check_env.mode, lgMaxSize)
511
512  val resp_pmp = pmp_check(req.cmd, res_pmp.cfg)
513  val resp_pma = pma_check(req.cmd, res_pma.cfg)
514  val resp = if (pmpUsed) (resp_pmp | resp_pma) else resp_pma
515
516  if (sameCycle || leaveHitMux) {
517    io.resp := resp
518  } else {
519    io.resp := RegEnable(resp, io.req.valid)
520  }
521}
522
523/* get config with check */
524@chiselName
525class PMPCheckerv2
526(
527  lgMaxSize: Int = 3,
528  sameCycle: Boolean = false,
529  leaveHitMux: Boolean = false
530)(implicit p: Parameters) extends PMPModule
531  with PMPCheckMethod
532  with PMACheckMethod
533{
534  require(!(leaveHitMux && sameCycle))
535  val io = IO(new PMPCheckv2IO(lgMaxSize))
536
537  val req = io.req.bits
538
539  val res_pmp = pmp_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pmp, io.check_env.mode, lgMaxSize)
540  val res_pma = pma_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pma, io.check_env.mode, lgMaxSize)
541
542  val resp = and(res_pmp, res_pma)
543
544  if (sameCycle || leaveHitMux) {
545    io.resp := resp
546  } else {
547    io.resp := RegEnable(resp, io.req.valid)
548  }
549
550  def and(pmp: PMPEntry, pma: PMPEntry): PMPConfig = {
551    val tmp_res = Wire(new PMPConfig)
552    tmp_res.l := DontCare
553    tmp_res.a := DontCare
554    tmp_res.r := pmp.cfg.r && pma.cfg.r
555    tmp_res.w := pmp.cfg.w && pma.cfg.w
556    tmp_res.x := pmp.cfg.x && pma.cfg.x
557    tmp_res.c := pma.cfg.c
558    tmp_res.atomic := pma.cfg.atomic
559    tmp_res
560  }
561}
562