xref: /XiangShan/src/main/scala/xiangshan/backend/fu/PMP.scala (revision b03c55a5df5dc8793cb44b42dd60141566e57e78)
1/***************************************************************************************
2* Copyright (c) 2024 Beijing Institute of Open Source Chip (BOSC)
3* Copyright (c) 2020-2024 Institute of Computing Technology, Chinese Academy of Sciences
4* Copyright (c) 2020-2021 Peng Cheng Laboratory
5*
6* XiangShan is licensed under Mulan PSL v2.
7* You can use this software according to the terms and conditions of the Mulan PSL v2.
8* You may obtain a copy of Mulan PSL v2 at:
9*          http://license.coscl.org.cn/MulanPSL2
10*
11* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
12* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
13* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
14*
15* See the Mulan PSL v2 for more details.
16***************************************************************************************/
17
18// See LICENSE.SiFive for license details.
19
20package xiangshan.backend.fu
21
22import org.chipsalliance.cde.config.Parameters
23import chisel3._
24import chisel3.util._
25import utility.MaskedRegMap.WritableMask
26import xiangshan._
27import xiangshan.backend.fu.util.HasCSRConst
28import utils._
29import utility._
30import xiangshan.cache.mmu.{TlbCmd, TlbExceptionBundle}
31
32trait PMPConst extends HasPMParameters {
33  val PMPOffBits = 2 // minimal 4bytes
34  val CoarserGrain: Boolean = PlatformGrain > PMPOffBits
35}
36
37abstract class PMPBundle(implicit val p: Parameters) extends Bundle with PMPConst
38abstract class PMPModule(implicit val p: Parameters) extends Module with PMPConst
39abstract class PMPXSModule(implicit p: Parameters) extends XSModule with PMPConst
40
41class PMPConfig(implicit p: Parameters) extends PMPBundle {
42  val l = Bool()
43  val c = Bool() // res(1), unuse in pmp
44  val atomic = Bool() // res(0), unuse in pmp
45  val a = UInt(2.W)
46  val x = Bool()
47  val w = Bool()
48  val r = Bool()
49
50  def res: UInt = Cat(c, atomic) // in pmp, unused
51  def off = a === 0.U
52  def tor = a === 1.U
53  def na4 = { if (CoarserGrain) false.B else a === 2.U }
54  def napot = { if (CoarserGrain) a(1).asBool else a === 3.U }
55  def off_tor = !a(1)
56  def na4_napot = a(1)
57
58  def locked = l
59  def addr_locked: Bool = locked
60  def addr_locked(next: PMPConfig): Bool = locked || (next.locked && next.tor)
61}
62
63object PMPConfigUInt {
64  def apply(
65    l: Boolean = false,
66    c: Boolean = false,
67    atomic: Boolean = false,
68    a: Int = 0,
69    x: Boolean = false,
70    w: Boolean = false,
71    r: Boolean = false)(implicit p: Parameters): UInt = {
72    var config = 0
73    if (l) { config += (1 << 7) }
74    if (c) { config += (1 << 6) }
75    if (atomic) { config += (1 << 5) }
76    if (a > 0) { config += (a << 3) }
77    if (x) { config += (1 << 2) }
78    if (w) { config += (1 << 1) }
79    if (r) { config += (1 << 0) }
80    config.U(8.W)
81  }
82}
83trait PMPReadWriteMethodBare extends PMPConst {
84  def match_mask(cfg: PMPConfig, paddr: UInt) = {
85    val match_mask_c_addr = Cat(paddr, cfg.a(0)) | (((1 << PlatformGrain) - 1) >> PMPOffBits).U((paddr.getWidth + 1).W)
86    Cat(match_mask_c_addr & ~(match_mask_c_addr + 1.U), ((1 << PMPOffBits) - 1).U(PMPOffBits.W))
87  }
88
89  def write_cfg_vec(mask: Vec[UInt], addr: Vec[UInt], index: Int, oldcfg: UInt)(cfgs: UInt): UInt = {
90    val cfgVec = Wire(Vec(cfgs.getWidth/8, new PMPConfig))
91    for (i <- cfgVec.indices) {
92      val cfg_w_m_tmp = cfgs((i+1)*8-1, i*8).asUInt.asTypeOf(new PMPConfig)
93      val cfg_old_tmp = oldcfg((i+1)*8-1, i*8).asUInt.asTypeOf(new PMPConfig)
94      cfgVec(i) := cfg_old_tmp
95      when (!cfg_old_tmp.l) {
96        cfgVec(i) := cfg_w_m_tmp
97        cfgVec(i).w := cfg_w_m_tmp.w && cfg_w_m_tmp.r
98        if (CoarserGrain) { cfgVec(i).a := Cat(cfg_w_m_tmp.a(1), cfg_w_m_tmp.a.orR) }
99        when (cfgVec(i).na4_napot) {
100          mask(index + i) := match_mask(cfgVec(i), addr(index + i))
101        }
102      }
103    }
104    cfgVec.asUInt
105  }
106
107  def read_addr(cfg: PMPConfig)(addr: UInt): UInt = {
108    val G = PlatformGrain - PMPOffBits
109    require(G >= 0)
110    if (G == 0) {
111      addr
112    } else if (G >= 2) {
113      Mux(cfg.na4_napot, set_low_bits(addr, G-1), clear_low_bits(addr, G))
114    } else { // G is 1
115      Mux(cfg.off_tor, clear_low_bits(addr, G), addr)
116    }
117  }
118
119  def write_addr(next: PMPConfig, mask: UInt)(paddr: UInt, cfg: PMPConfig, addr: UInt): UInt = {
120    val locked = cfg.addr_locked(next)
121    mask := Mux(!locked, match_mask(cfg, paddr), mask)
122    Mux(!locked, paddr, addr)
123  }
124
125  def set_low_bits(data: UInt, num: Int): UInt = {
126    require(num >= 0)
127    data | ((1 << num)-1).U
128  }
129
130  /** mask the data's low num bits (lsb) */
131  def clear_low_bits(data: UInt, num: Int): UInt = {
132    require(num >= 0)
133    // use Cat instead of & with mask to avoid "Signal Width" problem
134    if (num == 0) { data }
135    else { Cat(data(data.getWidth-1, num), 0.U(num.W)) }
136  }
137}
138
139trait PMPReadWriteMethod extends PMPReadWriteMethodBare  { this: PMPBase =>
140  def write_cfg_vec(oldcfg: UInt)(cfgs: UInt): UInt = {
141    val cfgVec = Wire(Vec(cfgs.getWidth/8, new PMPConfig))
142    for (i <- cfgVec.indices) {
143      val cfg_w_tmp = cfgs((i+1)*8-1, i*8).asUInt.asTypeOf(new PMPConfig)
144      val cfg_old_tmp = oldcfg((i+1)*8-1, i*8).asUInt.asTypeOf(new PMPConfig)
145      cfgVec(i) := cfg_old_tmp
146      when (!cfg_old_tmp.l) {
147        cfgVec(i) := cfg_w_tmp
148        cfgVec(i).w := cfg_w_tmp.w && cfg_w_tmp.r
149        if (CoarserGrain) { cfgVec(i).a := Cat(cfg_w_tmp.a(1), cfg_w_tmp.a.orR) }
150      }
151    }
152    cfgVec.asUInt
153  }
154
155  /** In general, the PMP grain is 2**{G+2} bytes. when G >= 1, na4 is not selectable.
156   * When G >= 2 and cfg.a(1) is set(then the mode is napot), the bits addr(G-2, 0) read as zeros.
157   * When G >= 1 and cfg.a(1) is clear(the mode is off or tor), the addr(G-1, 0) read as zeros.
158   * The low OffBits is dropped
159   */
160  def read_addr(): UInt = {
161    read_addr(cfg)(addr)
162  }
163
164  /** addr for inside addr, drop OffBits with.
165   * compare_addr for inside addr for comparing.
166   * paddr for outside addr.
167   */
168  def write_addr(next: PMPConfig)(paddr: UInt): UInt = {
169    Mux(!cfg.addr_locked(next), paddr, addr)
170  }
171  def write_addr(paddr: UInt): UInt = {
172    Mux(!cfg.addr_locked, paddr, addr)
173  }
174}
175
176/** PMPBase for CSR unit
177  * with only read and write logic
178  */
179class PMPBase(implicit p: Parameters) extends PMPBundle with PMPReadWriteMethod {
180  val cfg = new PMPConfig
181  val addr = UInt((PMPAddrBits - PMPOffBits).W)
182
183  def gen(cfg: PMPConfig, addr: UInt) = {
184    require(addr.getWidth == this.addr.getWidth)
185    this.cfg := cfg
186    this.addr := addr
187  }
188}
189
190trait PMPMatchMethod extends PMPConst { this: PMPEntry =>
191  /** compare_addr is used to compare with input addr */
192  def compare_addr: UInt = ((addr << PMPOffBits) & ~(((1 << PlatformGrain) - 1).U(PMPAddrBits.W))).asUInt
193
194  /** size and maxSize are all log2 Size
195   * for dtlb, the maxSize is bPMXLEN which is 8
196   * for itlb and ptw, the maxSize is log2(512) ?
197   * but we may only need the 64 bytes? how to prevent the bugs?
198   * TODO: handle the special case that itlb & ptw & dcache access wider size than PMXLEN
199   */
200  def is_match(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last_pmp: PMPEntry): Bool = {
201    Mux(cfg.na4_napot, napotMatch(paddr, lgSize, lgMaxSize),
202      Mux(cfg.tor, torMatch(paddr, lgSize, lgMaxSize, last_pmp), false.B))
203  }
204
205  /** generate match mask to help match in napot mode */
206  def match_mask(paddr: UInt): UInt = {
207    match_mask(cfg, paddr)
208  }
209
210  def boundMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int): Bool = {
211    if (lgMaxSize <= PlatformGrain) {
212      (paddr < compare_addr)
213    } else {
214      val highLess = (paddr >> lgMaxSize) < (compare_addr >> lgMaxSize)
215      val highEqual = (paddr >> lgMaxSize) === (compare_addr >> lgMaxSize)
216      val lowLess = (paddr(lgMaxSize-1, 0) | OneHot.UIntToOH1(lgSize, lgMaxSize))  < compare_addr(lgMaxSize-1, 0)
217      highLess || (highEqual && lowLess)
218    }
219  }
220
221  def lowerBoundMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int): Bool = {
222    !boundMatch(paddr, lgSize, lgMaxSize)
223  }
224
225  def higherBoundMatch(paddr: UInt, lgMaxSize: Int) = {
226    boundMatch(paddr, 0.U, lgMaxSize)
227  }
228
229  def torMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last_pmp: PMPEntry): Bool = {
230    last_pmp.lowerBoundMatch(paddr, lgSize, lgMaxSize) && higherBoundMatch(paddr, lgMaxSize)
231  }
232
233  def unmaskEqual(a: UInt, b: UInt, m: UInt) = {
234    (a & ~m) === (b & ~m)
235  }
236
237  def napotMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int) = {
238    if (lgMaxSize <= PlatformGrain) {
239      unmaskEqual(paddr, compare_addr, mask)
240    } else {
241      val lowMask = mask | OneHot.UIntToOH1(lgSize, lgMaxSize)
242      val highMatch = unmaskEqual(paddr >> lgMaxSize, compare_addr >> lgMaxSize, mask >> lgMaxSize)
243      val lowMatch = unmaskEqual(paddr(lgMaxSize-1, 0), compare_addr(lgMaxSize-1, 0), lowMask(lgMaxSize-1, 0))
244      highMatch && lowMatch
245    }
246  }
247
248  def aligned(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last: PMPEntry) = {
249    if (lgMaxSize <= PlatformGrain) {
250      true.B
251    } else {
252      val lowBitsMask = OneHot.UIntToOH1(lgSize, lgMaxSize)
253      val lowerBound = ((paddr >> lgMaxSize) === (last.compare_addr >> lgMaxSize)) &&
254        ((~paddr(lgMaxSize-1, 0) & last.compare_addr(lgMaxSize-1, 0)) =/= 0.U)
255      val upperBound = ((paddr >> lgMaxSize) === (compare_addr >> lgMaxSize)) &&
256        ((compare_addr(lgMaxSize-1, 0) & (paddr(lgMaxSize-1, 0) | lowBitsMask)) =/= 0.U)
257      val torAligned = !(lowerBound || upperBound)
258      val napotAligned = (lowBitsMask & ~mask(lgMaxSize-1, 0)) === 0.U
259      Mux(cfg.na4_napot, napotAligned, torAligned)
260    }
261  }
262}
263
264/** PMPEntry for outside pmp copies
265  * with one more elements mask to help napot match
266  * TODO: make mask an element, not an method, for timing opt
267  */
268class PMPEntry(implicit p: Parameters) extends PMPBase with PMPMatchMethod {
269  val mask = UInt(PMPAddrBits.W) // help to match in napot
270
271  def write_addr(next: PMPConfig, mask: UInt)(paddr: UInt) = {
272    mask := Mux(!cfg.addr_locked(next), match_mask(paddr), mask)
273    Mux(!cfg.addr_locked(next), paddr, addr)
274  }
275
276  def write_addr(mask: UInt)(paddr: UInt) = {
277    mask := Mux(!cfg.addr_locked, match_mask(paddr), mask)
278    Mux(!cfg.addr_locked, paddr, addr)
279  }
280
281  def gen(cfg: PMPConfig, addr: UInt, mask: UInt) = {
282    require(addr.getWidth == this.addr.getWidth)
283    this.cfg := cfg
284    this.addr := addr
285    this.mask := mask
286  }
287}
288
289trait PMPMethod extends PMPConst {
290  def pmp_init() : (Vec[UInt], Vec[UInt], Vec[UInt])= {
291    val cfg = WireInit(0.U.asTypeOf(Vec(NumPMP/8, UInt(PMXLEN.W))))
292    // val addr = Wire(Vec(NumPMP, UInt((PMPAddrBits-PMPOffBits).W)))
293    // val mask = Wire(Vec(NumPMP, UInt(PMPAddrBits.W)))
294    // INFO: these CSRs could be uninitialized, but for difftesting with NEMU, we opt to initialize them.
295    val addr = WireInit(0.U.asTypeOf(Vec(NumPMP, UInt((PMPAddrBits-PMPOffBits).W))))
296    val mask = WireInit(0.U.asTypeOf(Vec(NumPMP, UInt(PMPAddrBits.W))))
297    (cfg, addr, mask)
298  }
299
300  def pmp_gen_mapping
301  (
302    init: () => (Vec[UInt], Vec[UInt], Vec[UInt]),
303    num: Int = 16,
304    cfgBase: Int,
305    addrBase: Int,
306    entries: Vec[PMPEntry]
307  ) = {
308    val pmpCfgPerCSR = PMXLEN / new PMPConfig().getWidth
309    def pmpCfgIndex(i: Int) = (PMXLEN / 32) * (i / pmpCfgPerCSR)
310    val init_value = init()
311    /** to fit MaskedRegMap's write, declare cfgs as Merged CSRs and split them into each pmp */
312    val cfgMerged = RegInit(init_value._1) //(Vec(num / pmpCfgPerCSR, UInt(PMXLEN.W))) // RegInit(VecInit(Seq.fill(num / pmpCfgPerCSR)(0.U(PMXLEN.W))))
313    val cfgs = WireInit(cfgMerged).asTypeOf(Vec(num, new PMPConfig()))
314    val addr = RegInit(init_value._2) // (Vec(num, UInt((PMPAddrBits-PMPOffBits).W)))
315    val mask = RegInit(init_value._3) // (Vec(num, UInt(PMPAddrBits.W)))
316
317    for (i <- entries.indices) {
318      entries(i).gen(cfgs(i), addr(i), mask(i))
319    }
320
321    val cfg_mapping = (0 until num by pmpCfgPerCSR).map(i => {Map(
322      MaskedRegMap(
323        addr = cfgBase + pmpCfgIndex(i),
324        reg = cfgMerged(i/pmpCfgPerCSR),
325        wmask = WritableMask,
326        wfn = new PMPBase().write_cfg_vec(mask, addr, i, cfgMerged(i/pmpCfgPerCSR))
327      ))
328    }).fold(Map())((a, b) => a ++ b) // ugly code, hit me if u have better codes
329
330    val addr_mapping = (0 until num).map(i => {Map(
331      MaskedRegMap(
332        addr = addrBase + i,
333        reg = addr(i),
334        wmask = WritableMask,
335        wfn = { if (i != num-1) entries(i).write_addr(entries(i+1).cfg, mask(i)) else entries(i).write_addr(mask(i)) },
336        rmask = WritableMask,
337        rfn = new PMPBase().read_addr(entries(i).cfg)
338      ))
339    }).fold(Map())((a, b) => a ++ b) // ugly code, hit me if u have better codes.
340
341    cfg_mapping ++ addr_mapping
342  }
343}
344
345class PMP(implicit p: Parameters) extends PMPXSModule with HasXSParameter with PMPMethod with PMAMethod with HasCSRConst {
346  val io = IO(new Bundle {
347    val distribute_csr = Flipped(new DistributedCSRIO())
348    val pmp = Output(Vec(NumPMP, new PMPEntry()))
349    val pma = Output(Vec(NumPMA, new PMPEntry()))
350  })
351
352  val w = io.distribute_csr.w
353
354  val pmp = Wire(Vec(NumPMP, new PMPEntry()))
355  val pma = Wire(Vec(NumPMA, new PMPEntry()))
356
357  val pmpMapping = pmp_gen_mapping(pmp_init, NumPMP, PmpcfgBase, PmpaddrBase, pmp)
358  val pmaMapping = pmp_gen_mapping(pma_init, NumPMA, PmacfgBase, PmaaddrBase, pma)
359  val mapping = pmpMapping ++ pmaMapping
360
361  val rdata = Wire(UInt(PMXLEN.W))
362  MaskedRegMap.generate(mapping, w.bits.addr, rdata, w.valid, w.bits.data)
363
364  io.pmp := pmp
365  io.pma := pma
366}
367
368class PMPReqBundle(lgMaxSize: Int = 3)(implicit p: Parameters) extends PMPBundle {
369  val addr = Output(UInt(PMPAddrBits.W))
370  val size = Output(UInt(log2Ceil(lgMaxSize+1).W))
371  val cmd = Output(TlbCmd())
372
373  def apply(addr: UInt, size: UInt, cmd: UInt): Unit = {
374    this.addr := addr
375    this.size := size
376    this.cmd := cmd
377  }
378
379  def apply(addr: UInt): Unit = { // req minimal permission and req align size
380    apply(addr, lgMaxSize.U, TlbCmd.read)
381  }
382
383}
384
385class PMPRespBundle(implicit p: Parameters) extends PMPBundle {
386  val ld = Output(Bool())
387  val st = Output(Bool())
388  val instr = Output(Bool())
389  val mmio = Output(Bool())
390  val atomic = Output(Bool())
391
392  def |(resp: PMPRespBundle): PMPRespBundle = {
393    val res = Wire(new PMPRespBundle())
394    res.ld := this.ld || resp.ld
395    res.st := this.st || resp.st
396    res.instr := this.instr || resp.instr
397    res.mmio := this.mmio || resp.mmio
398    res.atomic := this.atomic || resp.atomic
399    res
400  }
401}
402
403trait PMPCheckMethod extends PMPConst {
404  def pmp_check(cmd: UInt, cfg: PMPConfig) = {
405    val resp = Wire(new PMPRespBundle)
406    resp.ld := TlbCmd.isRead(cmd) && !TlbCmd.isAmo(cmd) && !cfg.r
407    resp.st := (TlbCmd.isWrite(cmd) || TlbCmd.isAmo(cmd)) && !cfg.w
408    resp.instr := TlbCmd.isExec(cmd) && !cfg.x
409    resp.mmio := false.B
410    resp.atomic := false.B
411    resp
412  }
413
414  def pmp_match_res(leaveHitMux: Boolean = false, valid: Bool = true.B)(
415    addr: UInt,
416    size: UInt,
417    pmpEntries: Vec[PMPEntry],
418    mode: UInt,
419    lgMaxSize: Int
420  ) = {
421    val num = pmpEntries.size
422    require(num == NumPMP)
423
424    val passThrough = if (pmpEntries.isEmpty) true.B else (mode > 1.U)
425    val pmpDefault = WireInit(0.U.asTypeOf(new PMPEntry()))
426    pmpDefault.cfg.r := passThrough
427    pmpDefault.cfg.w := passThrough
428    pmpDefault.cfg.x := passThrough
429
430    val match_vec = Wire(Vec(num+1, Bool()))
431    val cfg_vec = Wire(Vec(num+1, new PMPEntry()))
432
433    pmpEntries.zip(pmpDefault +: pmpEntries.take(num-1)).zipWithIndex.foreach{ case ((pmp, last_pmp), i) =>
434      val is_match = pmp.is_match(addr, size, lgMaxSize, last_pmp)
435      val ignore = passThrough && !pmp.cfg.l
436      val aligned = pmp.aligned(addr, size, lgMaxSize, last_pmp)
437
438      val cur = WireInit(pmp)
439      cur.cfg.r := aligned && (pmp.cfg.r || ignore)
440      cur.cfg.w := aligned && (pmp.cfg.w || ignore)
441      cur.cfg.x := aligned && (pmp.cfg.x || ignore)
442
443//      Mux(is_match, cur, prev)
444      match_vec(i) := is_match
445      cfg_vec(i) := cur
446    }
447
448    // default value
449    match_vec(num) := true.B
450    cfg_vec(num) := pmpDefault
451
452    if (leaveHitMux) {
453      ParallelPriorityMux(match_vec.map(RegEnable(_, false.B, valid)), RegEnable(cfg_vec, valid))
454    } else {
455      ParallelPriorityMux(match_vec, cfg_vec)
456    }
457  }
458}
459
460class PMPCheckerEnv(implicit p: Parameters) extends PMPBundle {
461  val mode = UInt(2.W)
462  val pmp = Vec(NumPMP, new PMPEntry())
463  val pma = Vec(NumPMA, new PMPEntry())
464
465  def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry]): Unit = {
466    this.mode := mode
467    this.pmp := pmp
468    this.pma := pma
469  }
470}
471
472class PMPCheckIO(lgMaxSize: Int)(implicit p: Parameters) extends PMPBundle {
473  val check_env = Input(new PMPCheckerEnv())
474  val req = Flipped(Valid(new PMPReqBundle(lgMaxSize))) // usage: assign the valid to fire signal
475  val resp = new PMPRespBundle()
476
477  def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], req: Valid[PMPReqBundle]) = {
478    check_env.apply(mode, pmp, pma)
479    this.req := req
480    resp
481  }
482
483  def req_apply(valid: Bool, addr: UInt): Unit = {
484    this.req.valid := valid
485    this.req.bits.apply(addr)
486  }
487
488  def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], valid: Bool, addr: UInt) = {
489    check_env.apply(mode, pmp, pma)
490    req_apply(valid, addr)
491    resp
492  }
493}
494
495class PMPCheckv2IO(lgMaxSize: Int)(implicit p: Parameters) extends PMPBundle {
496  val check_env = Input(new PMPCheckerEnv())
497  val req = Flipped(Valid(new PMPReqBundle(lgMaxSize))) // usage: assign the valid to fire signal
498  val resp = Output(new PMPConfig())
499
500  def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], req: Valid[PMPReqBundle]) = {
501    check_env.apply(mode, pmp, pma)
502    this.req := req
503    resp
504  }
505
506  def req_apply(valid: Bool, addr: UInt): Unit = {
507    this.req.valid := valid
508    this.req.bits.apply(addr)
509  }
510
511  def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], valid: Bool, addr: UInt) = {
512    check_env.apply(mode, pmp, pma)
513    req_apply(valid, addr)
514    resp
515  }
516}
517
518class PMPChecker
519(
520  lgMaxSize: Int = 3,
521  sameCycle: Boolean = false,
522  leaveHitMux: Boolean = false,
523  pmpUsed: Boolean = true
524)(implicit p: Parameters) extends PMPModule
525  with PMPCheckMethod
526  with PMACheckMethod
527{
528  require(!(leaveHitMux && sameCycle))
529  val io = IO(new PMPCheckIO(lgMaxSize))
530
531  val req = io.req.bits
532
533  val res_pmp = pmp_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pmp, io.check_env.mode, lgMaxSize)
534  val res_pma = pma_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pma, io.check_env.mode, lgMaxSize)
535
536  val resp_pmp = pmp_check(req.cmd, res_pmp.cfg)
537  val resp_pma = pma_check(req.cmd, res_pma.cfg)
538  val resp = if (pmpUsed) (resp_pmp | resp_pma) else resp_pma
539
540  if (sameCycle || leaveHitMux) {
541    io.resp := resp
542  } else {
543    io.resp := RegEnable(resp, io.req.valid)
544  }
545}
546
547/* get config with check */
548class PMPCheckerv2
549(
550  lgMaxSize: Int = 3,
551  sameCycle: Boolean = false,
552  leaveHitMux: Boolean = false
553)(implicit p: Parameters) extends PMPModule
554  with PMPCheckMethod
555  with PMACheckMethod
556{
557  require(!(leaveHitMux && sameCycle))
558  val io = IO(new PMPCheckv2IO(lgMaxSize))
559
560  val req = io.req.bits
561
562  val res_pmp = pmp_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pmp, io.check_env.mode, lgMaxSize)
563  val res_pma = pma_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pma, io.check_env.mode, lgMaxSize)
564
565  val resp = and(res_pmp, res_pma)
566
567  if (sameCycle || leaveHitMux) {
568    io.resp := resp
569  } else {
570    io.resp := RegEnable(resp, io.req.valid)
571  }
572
573  def and(pmp: PMPEntry, pma: PMPEntry): PMPConfig = {
574    val tmp_res = Wire(new PMPConfig)
575    tmp_res.l := DontCare
576    tmp_res.a := DontCare
577    tmp_res.r := pmp.cfg.r && pma.cfg.r
578    tmp_res.w := pmp.cfg.w && pma.cfg.w
579    tmp_res.x := pmp.cfg.x && pma.cfg.x
580    tmp_res.c := pma.cfg.c
581    tmp_res.atomic := pma.cfg.atomic
582    tmp_res
583  }
584}
585